diff --git a/Package.swift b/Package.swift index 281ebf71e7..522931580a 100644 --- a/Package.swift +++ b/Package.swift @@ -113,6 +113,7 @@ let package = Package( .library(name: "SotoDataExchange", targets: ["SotoDataExchange"]), .library(name: "SotoDataPipeline", targets: ["SotoDataPipeline"]), .library(name: "SotoDataSync", targets: ["SotoDataSync"]), + .library(name: "SotoDataZone", targets: ["SotoDataZone"]), .library(name: "SotoDatabaseMigrationService", targets: ["SotoDatabaseMigrationService"]), .library(name: "SotoDetective", targets: ["SotoDetective"]), .library(name: "SotoDevOpsGuru", targets: ["SotoDevOpsGuru"]), @@ -475,6 +476,7 @@ let package = Package( .target(name: "SotoDataExchange", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DataExchange"), .target(name: "SotoDataPipeline", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DataPipeline"), .target(name: "SotoDataSync", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DataSync"), + .target(name: "SotoDataZone", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DataZone"), .target(name: "SotoDatabaseMigrationService", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DatabaseMigrationService"), .target(name: "SotoDetective", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Detective"), .target(name: "SotoDevOpsGuru", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DevOpsGuru"), diff --git a/Sources/Soto/Services/AppConfig/AppConfig_shapes.swift b/Sources/Soto/Services/AppConfig/AppConfig_shapes.swift index 150ccbb5f9..37ac02ac50 100644 --- a/Sources/Soto/Services/AppConfig/AppConfig_shapes.swift +++ b/Sources/Soto/Services/AppConfig/AppConfig_shapes.swift @@ -266,6 +266,10 @@ extension AppConfig { public let description: String? /// The configuration profile ID. public let id: String? + /// The Amazon Resource Name of the Key Management Service key to encrypt new configuration data versions in the AppConfig hosted configuration store. This attribute is only used for hosted configuration types. To encrypt data managed in other configuration stores, see the documentation for how to specify an KMS key for that particular service. + public let kmsKeyArn: String? + /// The Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated. + public let kmsKeyIdentifier: String? /// The URI location of the configuration. public let locationUri: String? /// The name of the configuration profile. @@ -277,10 +281,12 @@ extension AppConfig { /// A list of methods for validating the configuration. public let validators: [Validator]? - public init(applicationId: String? = nil, description: String? = nil, id: String? = nil, locationUri: String? = nil, name: String? = nil, retrievalRoleArn: String? = nil, type: String? = nil, validators: [Validator]? = nil) { + public init(applicationId: String? = nil, description: String? = nil, id: String? = nil, kmsKeyArn: String? = nil, kmsKeyIdentifier: String? = nil, locationUri: String? = nil, name: String? = nil, retrievalRoleArn: String? = nil, type: String? = nil, validators: [Validator]? = nil) { self.applicationId = applicationId self.description = description self.id = id + self.kmsKeyArn = kmsKeyArn + self.kmsKeyIdentifier = kmsKeyIdentifier self.locationUri = locationUri self.name = name self.retrievalRoleArn = retrievalRoleArn @@ -292,6 +298,8 @@ extension AppConfig { case applicationId = "ApplicationId" case description = "Description" case id = "Id" + case kmsKeyArn = "KmsKeyArn" + case kmsKeyIdentifier = "KmsKeyIdentifier" case locationUri = "LocationUri" case name = "Name" case retrievalRoleArn = "RetrievalRoleArn" @@ -392,6 +400,8 @@ extension AppConfig { public let applicationId: String /// A description of the configuration profile. public let description: String? + /// The identifier for an Key Management Service key to encrypt new configuration data versions in the AppConfig hosted configuration store. This attribute is only used for hosted configuration types. The identifier can be an KMS key ID, alias, or the Amazon Resource Name (ARN) of the key ID or alias. To encrypt data managed in other configuration stores, see the documentation for how to specify an KMS key for that particular service. + public let kmsKeyIdentifier: String? /// A URI to locate the configuration. You can specify the following: For the AppConfig hosted configuration store and for feature flags, specify hosted. For an Amazon Web Services Systems Manager Parameter Store parameter, specify either the parameter name in the format ssm-parameter:// or the ARN. For an Amazon Web Services CodePipeline pipeline, specify the URI in the following format: codepipeline://. For an Secrets Manager secret, specify the URI in the following format: secretsmanager://. For an Amazon S3 object, specify the URI in the following format: s3:/// . Here is an example: s3://my-bucket/my-app/us-east-1/my-config.json For an SSM document, specify either the document name in the format ssm-document:// or the Amazon Resource Name (ARN). public let locationUri: String /// A name for the configuration profile. @@ -405,9 +415,10 @@ extension AppConfig { /// A list of methods for validating the configuration. public let validators: [Validator]? - public init(applicationId: String, description: String? = nil, locationUri: String, name: String, retrievalRoleArn: String? = nil, tags: [String: String]? = nil, type: String? = nil, validators: [Validator]? = nil) { + public init(applicationId: String, description: String? = nil, kmsKeyIdentifier: String? = nil, locationUri: String, name: String, retrievalRoleArn: String? = nil, tags: [String: String]? = nil, type: String? = nil, validators: [Validator]? = nil) { self.applicationId = applicationId self.description = description + self.kmsKeyIdentifier = kmsKeyIdentifier self.locationUri = locationUri self.name = name self.retrievalRoleArn = retrievalRoleArn @@ -419,6 +430,7 @@ extension AppConfig { public func validate(name: String) throws { try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[a-z0-9]{4,7}$") try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}|alias/[a-zA-Z0-9/_-]{1,250}|arn:aws[a-zA-Z-]*:kms:[a-z]{2}(-gov|-iso(b?))?-[a-z]+-\\d{1}:\\d{12}:(key/[0-9a-f-]{36}|alias/[a-zA-Z0-9/_-]{1,250})$") try self.validate(self.locationUri, name: "locationUri", parent: name, max: 2048) try self.validate(self.locationUri, name: "locationUri", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 128) @@ -441,6 +453,7 @@ extension AppConfig { private enum CodingKeys: String, CodingKey { case description = "Description" + case kmsKeyIdentifier = "KmsKeyIdentifier" case locationUri = "LocationUri" case name = "Name" case retrievalRoleArn = "RetrievalRoleArn" @@ -582,8 +595,7 @@ extension AppConfig { try self.validate(self.extensionIdentifier, name: "extensionIdentifier", parent: name, max: 2048) try self.validate(self.extensionIdentifier, name: "extensionIdentifier", parent: name, min: 1) try self.parameters?.forEach { - try validate($0.key, name: "parameters.key", parent: name, max: 64) - try validate($0.key, name: "parameters.key", parent: name, min: 1) + try validate($0.key, name: "parameters.key", parent: name, pattern: "^[^\\/#:\\n]{1,64}$") try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, max: 2048) try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, min: 1) } @@ -642,11 +654,9 @@ extension AppConfig { try self.validate(self.actions, name: "actions", parent: name, max: 5) try self.validate(self.actions, name: "actions", parent: name, min: 1) try self.validate(self.description, name: "description", parent: name, max: 1024) - try self.validate(self.name, name: "name", parent: name, max: 64) - try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\/#:\\n]{1,64}$") try self.parameters?.forEach { - try validate($0.key, name: "parameters.key", parent: name, max: 64) - try validate($0.key, name: "parameters.key", parent: name, min: 1) + try validate($0.key, name: "parameters.key", parent: name, pattern: "^[^\\/#:\\n]{1,64}$") try $0.value.validate(name: "\(name).parameters[\"\($0.key)\"]") } try self.validate(self.parameters, name: "parameters", parent: name, max: 5) @@ -913,7 +923,7 @@ extension AppConfig { public let growthType: GrowthType? /// The Amazon Resource Name of the Key Management Service key used to encrypt configuration data. You can encrypt secrets stored in Secrets Manager, Amazon Simple Storage Service (Amazon S3) objects encrypted with SSE-KMS, or secure string parameters stored in Amazon Web Services Systems Manager Parameter Store. public let kmsKeyArn: String? - /// The KMS key identifier (key ID, key alias, or key ARN). AppConfig uses this ID to encrypt the configuration data using a customer managed key. + /// The Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated. public let kmsKeyIdentifier: String? /// The percentage of targets for which the deployment is available. public let percentageComplete: Float? @@ -1583,6 +1593,7 @@ extension AppConfig { AWSMemberEncoding(label: "content", location: .body("Content")), AWSMemberEncoding(label: "contentType", location: .header("Content-Type")), AWSMemberEncoding(label: "description", location: .header("Description")), + AWSMemberEncoding(label: "kmsKeyArn", location: .header("KmsKeyArn")), AWSMemberEncoding(label: "versionLabel", location: .header("VersionLabel")), AWSMemberEncoding(label: "versionNumber", location: .header("Version-Number")) ] @@ -1597,17 +1608,20 @@ extension AppConfig { public let contentType: String? /// A description of the configuration. public let description: String? + /// The Amazon Resource Name of the Key Management Service key that was used to encrypt this specific version of the configuration data in the AppConfig hosted configuration store. + public let kmsKeyArn: String? /// A user-defined label for an AppConfig hosted configuration version. public let versionLabel: String? /// The configuration version. public let versionNumber: Int? - public init(applicationId: String? = nil, configurationProfileId: String? = nil, content: AWSPayload? = nil, contentType: String? = nil, description: String? = nil, versionLabel: String? = nil, versionNumber: Int? = nil) { + public init(applicationId: String? = nil, configurationProfileId: String? = nil, content: AWSPayload? = nil, contentType: String? = nil, description: String? = nil, kmsKeyArn: String? = nil, versionLabel: String? = nil, versionNumber: Int? = nil) { self.applicationId = applicationId self.configurationProfileId = configurationProfileId self.content = content self.contentType = contentType self.description = description + self.kmsKeyArn = kmsKeyArn self.versionLabel = versionLabel self.versionNumber = versionNumber } @@ -1618,6 +1632,7 @@ extension AppConfig { case content = "Content" case contentType = "Content-Type" case description = "Description" + case kmsKeyArn = "KmsKeyArn" case versionLabel = "VersionLabel" case versionNumber = "Version-Number" } @@ -1632,16 +1647,19 @@ extension AppConfig { public let contentType: String? /// A description of the configuration. public let description: String? + /// The Amazon Resource Name of the Key Management Service key that was used to encrypt this specific version of the configuration data in the AppConfig hosted configuration store. + public let kmsKeyArn: String? /// A user-defined label for an AppConfig hosted configuration version. public let versionLabel: String? /// The configuration version. public let versionNumber: Int? - public init(applicationId: String? = nil, configurationProfileId: String? = nil, contentType: String? = nil, description: String? = nil, versionLabel: String? = nil, versionNumber: Int? = nil) { + public init(applicationId: String? = nil, configurationProfileId: String? = nil, contentType: String? = nil, description: String? = nil, kmsKeyArn: String? = nil, versionLabel: String? = nil, versionNumber: Int? = nil) { self.applicationId = applicationId self.configurationProfileId = configurationProfileId self.contentType = contentType self.description = description + self.kmsKeyArn = kmsKeyArn self.versionLabel = versionLabel self.versionNumber = versionNumber } @@ -1651,6 +1669,7 @@ extension AppConfig { case configurationProfileId = "ConfigurationProfileId" case contentType = "ContentType" case description = "Description" + case kmsKeyArn = "KmsKeyArn" case versionLabel = "VersionLabel" case versionNumber = "VersionNumber" } @@ -2067,8 +2086,7 @@ extension AppConfig { try self.validate(self.deploymentStrategyId, name: "deploymentStrategyId", parent: name, pattern: "^(^[a-z0-9]{4,7}$|^AppConfig\\.[A-Za-z0-9]{9,40}$)$") try self.validate(self.description, name: "description", parent: name, max: 1024) try self.validate(self.environmentId, name: "environmentId", parent: name, pattern: "^[a-z0-9]{4,7}$") - try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, max: 2048) - try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, min: 1) + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}|alias/[a-zA-Z0-9/_-]{1,250}|arn:aws[a-zA-Z-]*:kms:[a-z]{2}(-gov|-iso(b?))?-[a-z]+-\\d{1}:\\d{12}:(key/[0-9a-f-]{36}|alias/[a-zA-Z0-9/_-]{1,250})$") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -2220,6 +2238,8 @@ extension AppConfig { public let configurationProfileId: String /// A description of the configuration profile. public let description: String? + /// The identifier for a Key Management Service key to encrypt new configuration data versions in the AppConfig hosted configuration store. This attribute is only used for hosted configuration types. The identifier can be an KMS key ID, alias, or the Amazon Resource Name (ARN) of the key ID or alias. To encrypt data managed in other configuration stores, see the documentation for how to specify an KMS key for that particular service. + public let kmsKeyIdentifier: String? /// The name of the configuration profile. public let name: String? /// The ARN of an IAM role with permission to access the configuration at the specified LocationUri. @@ -2227,10 +2247,11 @@ extension AppConfig { /// A list of methods for validating the configuration. public let validators: [Validator]? - public init(applicationId: String, configurationProfileId: String, description: String? = nil, name: String? = nil, retrievalRoleArn: String? = nil, validators: [Validator]? = nil) { + public init(applicationId: String, configurationProfileId: String, description: String? = nil, kmsKeyIdentifier: String? = nil, name: String? = nil, retrievalRoleArn: String? = nil, validators: [Validator]? = nil) { self.applicationId = applicationId self.configurationProfileId = configurationProfileId self.description = description + self.kmsKeyIdentifier = kmsKeyIdentifier self.name = name self.retrievalRoleArn = retrievalRoleArn self.validators = validators @@ -2240,6 +2261,7 @@ extension AppConfig { try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[a-z0-9]{4,7}$") try self.validate(self.configurationProfileId, name: "configurationProfileId", parent: name, pattern: "^[a-z0-9]{4,7}$") try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^\\s{0,1}|[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}|alias/[a-zA-Z0-9/_-]{1,250}|arn:aws[a-zA-Z-]*:kms:[a-z]{2}(-gov|-iso(b?))?-[a-z]+-\\d{1}:\\d{12}:(key/[0-9a-f-]{36}|alias/[a-zA-Z0-9/_-]{1,250})$") try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.retrievalRoleArn, name: "retrievalRoleArn", parent: name, max: 2048) @@ -2253,6 +2275,7 @@ extension AppConfig { private enum CodingKeys: String, CodingKey { case description = "Description" + case kmsKeyIdentifier = "KmsKeyIdentifier" case name = "Name" case retrievalRoleArn = "RetrievalRoleArn" case validators = "Validators" @@ -2368,8 +2391,7 @@ extension AppConfig { public func validate(name: String) throws { try self.validate(self.extensionAssociationId, name: "extensionAssociationId", parent: name, pattern: "^[a-z0-9]{4,7}$") try self.parameters?.forEach { - try validate($0.key, name: "parameters.key", parent: name, max: 64) - try validate($0.key, name: "parameters.key", parent: name, min: 1) + try validate($0.key, name: "parameters.key", parent: name, pattern: "^[^\\/#:\\n]{1,64}$") try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, max: 2048) try validate($0.value, name: "parameters[\"\($0.key)\"]", parent: name, min: 1) } @@ -2416,8 +2438,7 @@ extension AppConfig { try self.validate(self.extensionIdentifier, name: "extensionIdentifier", parent: name, max: 2048) try self.validate(self.extensionIdentifier, name: "extensionIdentifier", parent: name, min: 1) try self.parameters?.forEach { - try validate($0.key, name: "parameters.key", parent: name, max: 64) - try validate($0.key, name: "parameters.key", parent: name, min: 1) + try validate($0.key, name: "parameters.key", parent: name, pattern: "^[^\\/#:\\n]{1,64}$") try $0.value.validate(name: "\(name).parameters[\"\($0.key)\"]") } try self.validate(self.parameters, name: "parameters", parent: name, max: 5) diff --git a/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift b/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift index 6835845b8e..5c9d323577 100644 --- a/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift +++ b/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift @@ -3993,6 +3993,8 @@ extension AuditManager { try self.awsAccounts?.forEach { try $0.validate(name: "\(name).awsAccounts[]") } + try self.validate(self.awsAccounts, name: "awsAccounts", parent: name, max: 200) + try self.validate(self.awsAccounts, name: "awsAccounts", parent: name, min: 1) try self.awsServices?.forEach { try $0.validate(name: "\(name).awsServices[]") } diff --git a/Sources/Soto/Services/AutoScaling/AutoScaling_api+async.swift b/Sources/Soto/Services/AutoScaling/AutoScaling_api+async.swift index dacea64a7a..87cd35ed18 100644 --- a/Sources/Soto/Services/AutoScaling/AutoScaling_api+async.swift +++ b/Sources/Soto/Services/AutoScaling/AutoScaling_api+async.swift @@ -395,6 +395,28 @@ extension AutoScaling { ) } + /// Gets information about the instance refreshes for the specified Auto Scaling group. This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes. To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func describeInstanceRefreshesPaginator( + _ input: DescribeInstanceRefreshesType, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeInstanceRefreshes, + inputKey: \DescribeInstanceRefreshesType.nextToken, + outputKey: \DescribeInstanceRefreshesAnswer.nextToken, + logger: logger, + on: eventLoop + ) + } + /// Gets information about the launch configurations in the account and Region. /// Return PaginatorSequence for operation. /// @@ -417,6 +439,50 @@ extension AutoScaling { ) } + /// This API operation is superseded by DescribeTrafficSources, which can describe multiple traffic sources types. We recommend using DetachTrafficSources to simplify how you manage traffic sources. However, we continue to support DescribeLoadBalancerTargetGroups. You can use both the original DescribeLoadBalancerTargetGroups API operation and DescribeTrafficSources on the same Auto Scaling group. Gets information about the Elastic Load Balancing target groups for the specified Auto Scaling group. To determine the attachment status of the target group, use the State element in the response. When you attach a target group to an Auto Scaling group, the initial State value is Adding. The state transitions to Added after all Auto Scaling instances are registered with the target group. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions to InService after at least one Auto Scaling instance passes the health check. When the target group is in the InService state, Amazon EC2 Auto Scaling can terminate and replace any instances that are reported as unhealthy. If no registered instances pass the health checks, the target group doesn't enter the InService state. Target groups also have an InService state if you attach them in the CreateAutoScalingGroup API call. If your target group state is InService, but it is not working properly, check the scaling activities by calling DescribeScalingActivities and take any corrective actions necessary. For help with failed health checks, see Troubleshooting Amazon EC2 Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. You can use this operation to describe target groups that were attached by using AttachLoadBalancerTargetGroups, but not for target groups that were attached by using AttachTrafficSources. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func describeLoadBalancerTargetGroupsPaginator( + _ input: DescribeLoadBalancerTargetGroupsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeLoadBalancerTargetGroups, + inputKey: \DescribeLoadBalancerTargetGroupsRequest.nextToken, + outputKey: \DescribeLoadBalancerTargetGroupsResponse.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// This API operation is superseded by DescribeTrafficSources, which can describe multiple traffic sources types. We recommend using DescribeTrafficSources to simplify how you manage traffic sources. However, we continue to support DescribeLoadBalancers. You can use both the original DescribeLoadBalancers API operation and DescribeTrafficSources on the same Auto Scaling group. Gets information about the load balancers for the specified Auto Scaling group. This operation describes only Classic Load Balancers. If you have Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead. To determine the attachment status of the load balancer, use the State element in the response. When you attach a load balancer to an Auto Scaling group, the initial State value is Adding. The state transitions to Added after all Auto Scaling instances are registered with the load balancer. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions to InService after at least one Auto Scaling instance passes the health check. When the load balancer is in the InService state, Amazon EC2 Auto Scaling can terminate and replace any instances that are reported as unhealthy. If no registered instances pass the health checks, the load balancer doesn't enter the InService state. Load balancers also have an InService state if you attach them in the CreateAutoScalingGroup API call. If your load balancer state is InService, but it is not working properly, check the scaling activities by calling DescribeScalingActivities and take any corrective actions necessary. For help with failed health checks, see Troubleshooting Amazon EC2 Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func describeLoadBalancersPaginator( + _ input: DescribeLoadBalancersRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeLoadBalancers, + inputKey: \DescribeLoadBalancersRequest.nextToken, + outputKey: \DescribeLoadBalancersResponse.nextToken, + logger: logger, + on: eventLoop + ) + } + /// Gets information about the Amazon SNS notifications that are configured for one or more Auto Scaling groups. /// Return PaginatorSequence for operation. /// diff --git a/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift b/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift index ebc9bf8ebb..a019f9339d 100644 --- a/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift +++ b/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift @@ -514,6 +514,59 @@ extension AutoScaling { ) } + /// Gets information about the instance refreshes for the specified Auto Scaling group. This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes. To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func describeInstanceRefreshesPaginator( + _ input: DescribeInstanceRefreshesType, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, DescribeInstanceRefreshesAnswer, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.describeInstanceRefreshes, + inputKey: \DescribeInstanceRefreshesType.nextToken, + outputKey: \DescribeInstanceRefreshesAnswer.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func describeInstanceRefreshesPaginator( + _ input: DescribeInstanceRefreshesType, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (DescribeInstanceRefreshesAnswer, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.describeInstanceRefreshes, + inputKey: \DescribeInstanceRefreshesType.nextToken, + outputKey: \DescribeInstanceRefreshesAnswer.nextToken, + on: eventLoop, + onPage: onPage + ) + } + /// Gets information about the launch configurations in the account and Region. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. @@ -567,6 +620,112 @@ extension AutoScaling { ) } + /// This API operation is superseded by DescribeTrafficSources, which can describe multiple traffic sources types. We recommend using DetachTrafficSources to simplify how you manage traffic sources. However, we continue to support DescribeLoadBalancerTargetGroups. You can use both the original DescribeLoadBalancerTargetGroups API operation and DescribeTrafficSources on the same Auto Scaling group. Gets information about the Elastic Load Balancing target groups for the specified Auto Scaling group. To determine the attachment status of the target group, use the State element in the response. When you attach a target group to an Auto Scaling group, the initial State value is Adding. The state transitions to Added after all Auto Scaling instances are registered with the target group. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions to InService after at least one Auto Scaling instance passes the health check. When the target group is in the InService state, Amazon EC2 Auto Scaling can terminate and replace any instances that are reported as unhealthy. If no registered instances pass the health checks, the target group doesn't enter the InService state. Target groups also have an InService state if you attach them in the CreateAutoScalingGroup API call. If your target group state is InService, but it is not working properly, check the scaling activities by calling DescribeScalingActivities and take any corrective actions necessary. For help with failed health checks, see Troubleshooting Amazon EC2 Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. You can use this operation to describe target groups that were attached by using AttachLoadBalancerTargetGroups, but not for target groups that were attached by using AttachTrafficSources. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func describeLoadBalancerTargetGroupsPaginator( + _ input: DescribeLoadBalancerTargetGroupsRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, DescribeLoadBalancerTargetGroupsResponse, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.describeLoadBalancerTargetGroups, + inputKey: \DescribeLoadBalancerTargetGroupsRequest.nextToken, + outputKey: \DescribeLoadBalancerTargetGroupsResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func describeLoadBalancerTargetGroupsPaginator( + _ input: DescribeLoadBalancerTargetGroupsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (DescribeLoadBalancerTargetGroupsResponse, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.describeLoadBalancerTargetGroups, + inputKey: \DescribeLoadBalancerTargetGroupsRequest.nextToken, + outputKey: \DescribeLoadBalancerTargetGroupsResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// This API operation is superseded by DescribeTrafficSources, which can describe multiple traffic sources types. We recommend using DescribeTrafficSources to simplify how you manage traffic sources. However, we continue to support DescribeLoadBalancers. You can use both the original DescribeLoadBalancers API operation and DescribeTrafficSources on the same Auto Scaling group. Gets information about the load balancers for the specified Auto Scaling group. This operation describes only Classic Load Balancers. If you have Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead. To determine the attachment status of the load balancer, use the State element in the response. When you attach a load balancer to an Auto Scaling group, the initial State value is Adding. The state transitions to Added after all Auto Scaling instances are registered with the load balancer. If Elastic Load Balancing health checks are enabled for the Auto Scaling group, the state transitions to InService after at least one Auto Scaling instance passes the health check. When the load balancer is in the InService state, Amazon EC2 Auto Scaling can terminate and replace any instances that are reported as unhealthy. If no registered instances pass the health checks, the load balancer doesn't enter the InService state. Load balancers also have an InService state if you attach them in the CreateAutoScalingGroup API call. If your load balancer state is InService, but it is not working properly, check the scaling activities by calling DescribeScalingActivities and take any corrective actions necessary. For help with failed health checks, see Troubleshooting Amazon EC2 Auto Scaling: Health checks in the Amazon EC2 Auto Scaling User Guide. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func describeLoadBalancersPaginator( + _ input: DescribeLoadBalancersRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, DescribeLoadBalancersResponse, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.describeLoadBalancers, + inputKey: \DescribeLoadBalancersRequest.nextToken, + outputKey: \DescribeLoadBalancersResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func describeLoadBalancersPaginator( + _ input: DescribeLoadBalancersRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (DescribeLoadBalancersResponse, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.describeLoadBalancers, + inputKey: \DescribeLoadBalancersRequest.nextToken, + outputKey: \DescribeLoadBalancersResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + /// Gets information about the Amazon SNS notifications that are configured for one or more Auto Scaling groups. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. @@ -960,6 +1119,37 @@ extension AutoScaling.DescribeAutoScalingInstancesType: AWSPaginateToken { } } +extension AutoScaling.DescribeInstanceRefreshesType: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AutoScaling.DescribeInstanceRefreshesType { + return .init( + autoScalingGroupName: self.autoScalingGroupName, + instanceRefreshIds: self.instanceRefreshIds, + maxRecords: self.maxRecords, + nextToken: token + ) + } +} + +extension AutoScaling.DescribeLoadBalancerTargetGroupsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AutoScaling.DescribeLoadBalancerTargetGroupsRequest { + return .init( + autoScalingGroupName: self.autoScalingGroupName, + maxRecords: self.maxRecords, + nextToken: token + ) + } +} + +extension AutoScaling.DescribeLoadBalancersRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AutoScaling.DescribeLoadBalancersRequest { + return .init( + autoScalingGroupName: self.autoScalingGroupName, + maxRecords: self.maxRecords, + nextToken: token + ) + } +} + extension AutoScaling.DescribeNotificationConfigurationsType: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> AutoScaling.DescribeNotificationConfigurationsType { return .init( diff --git a/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift b/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift index 4343742421..bc69c9bbff 100644 --- a/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift +++ b/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift @@ -3507,9 +3507,9 @@ extension AutoScaling { try self.validate(self.lifecycleHookName, name: "lifecycleHookName", parent: name, max: 255) try self.validate(self.lifecycleHookName, name: "lifecycleHookName", parent: name, min: 1) try self.validate(self.lifecycleHookName, name: "lifecycleHookName", parent: name, pattern: "^[A-Za-z0-9\\-_\\/]+$") - try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, max: 1023) + try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, max: 4000) try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, min: 1) - try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") + try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, pattern: "^[\\u0009\\u000A\\u000D\\u0020-\\u007e]+$") try self.validate(self.notificationTargetARN, name: "notificationTargetARN", parent: name, max: 255) try self.validate(self.notificationTargetARN, name: "notificationTargetARN", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") try self.validate(self.roleARN, name: "roleARN", parent: name, max: 255) @@ -4189,9 +4189,9 @@ extension AutoScaling { try self.validate(self.lifecycleHookName, name: "lifecycleHookName", parent: name, max: 255) try self.validate(self.lifecycleHookName, name: "lifecycleHookName", parent: name, min: 1) try self.validate(self.lifecycleHookName, name: "lifecycleHookName", parent: name, pattern: "^[A-Za-z0-9\\-_\\/]+$") - try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, max: 1023) + try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, max: 4000) try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, min: 1) - try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") + try self.validate(self.notificationMetadata, name: "notificationMetadata", parent: name, pattern: "^[\\u0009\\u000A\\u000D\\u0020-\\u007e]+$") try self.validate(self.notificationTargetARN, name: "notificationTargetARN", parent: name, max: 255) try self.validate(self.notificationTargetARN, name: "notificationTargetARN", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") try self.validate(self.roleARN, name: "roleARN", parent: name, max: 255) diff --git a/Sources/Soto/Services/Bedrock/Bedrock_api.swift b/Sources/Soto/Services/Bedrock/Bedrock_api.swift index 03eae47cab..66fda55b4e 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_api.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_api.swift @@ -55,9 +55,11 @@ public struct Bedrock: AWSService { apiVersion: "2023-04-20", endpoint: endpoint, serviceEndpoints: [ + "bedrock-ap-northeast-1": "bedrock.ap-northeast-1.amazonaws.com", "bedrock-ap-southeast-1": "bedrock.ap-southeast-1.amazonaws.com", "bedrock-fips-us-east-1": "bedrock-fips.us-east-1.amazonaws.com", "bedrock-fips-us-west-2": "bedrock-fips.us-west-2.amazonaws.com", + "bedrock-runtime-ap-northeast-1": "bedrock-runtime.ap-northeast-1.amazonaws.com", "bedrock-runtime-ap-southeast-1": "bedrock-runtime.ap-southeast-1.amazonaws.com", "bedrock-runtime-fips-us-east-1": "bedrock-runtime-fips.us-east-1.amazonaws.com", "bedrock-runtime-fips-us-west-2": "bedrock-runtime-fips.us-west-2.amazonaws.com", diff --git a/Sources/Soto/Services/ConfigService/ConfigService_api+async.swift b/Sources/Soto/Services/ConfigService/ConfigService_api+async.swift index 4267ddb369..502e7702af 100644 --- a/Sources/Soto/Services/ConfigService/ConfigService_api+async.swift +++ b/Sources/Soto/Services/ConfigService/ConfigService_api+async.swift @@ -495,7 +495,8 @@ extension ConfigService { return try await self.client.execute(operation: "GetOrganizationCustomRulePolicy", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns a list of ConfigurationItems for the specified resource. + /// For accurate reporting on the compliance status, you must record the AWS::Config::ResourceCompliance resource type. + /// For more information, see Selecting Which Resources Config Records. Returns a list of ConfigurationItems for the specified resource. /// The list contains details about each state of the resource /// during the specified time interval. If you specified a retention /// period to retain your ConfigurationItems between a @@ -647,7 +648,9 @@ extension ConfigService { } /// Creates a delivery channel object to deliver configuration - /// information to an Amazon S3 bucket and Amazon SNS topic. Before you can create a delivery channel, you must create a + /// information and other compliance information to an Amazon S3 bucket and Amazon SNS topic. + /// For more information, + /// see Notifications that Config Sends to an Amazon SNS topic. Before you can create a delivery channel, you must create a /// configuration recorder. You can use this action to change the Amazon S3 bucket or an /// Amazon SNS topic of the existing delivery channel. To change the /// Amazon S3 bucket or an Amazon SNS topic, call this action and @@ -1649,7 +1652,8 @@ extension ConfigService { ) } - /// Returns a list of ConfigurationItems for the specified resource. + /// For accurate reporting on the compliance status, you must record the AWS::Config::ResourceCompliance resource type. + /// For more information, see Selecting Which Resources Config Records. Returns a list of ConfigurationItems for the specified resource. /// The list contains details about each state of the resource /// during the specified time interval. If you specified a retention /// period to retain your ConfigurationItems between a diff --git a/Sources/Soto/Services/ConfigService/ConfigService_api.swift b/Sources/Soto/Services/ConfigService/ConfigService_api.swift index 84a2acab40..a10ba81dc4 100644 --- a/Sources/Soto/Services/ConfigService/ConfigService_api.swift +++ b/Sources/Soto/Services/ConfigService/ConfigService_api.swift @@ -567,7 +567,8 @@ public struct ConfigService: AWSService { return self.client.execute(operation: "GetOrganizationCustomRulePolicy", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns a list of ConfigurationItems for the specified resource. + /// For accurate reporting on the compliance status, you must record the AWS::Config::ResourceCompliance resource type. + /// For more information, see Selecting Which Resources Config Records. Returns a list of ConfigurationItems for the specified resource. /// The list contains details about each state of the resource /// during the specified time interval. If you specified a retention /// period to retain your ConfigurationItems between a @@ -719,7 +720,9 @@ public struct ConfigService: AWSService { } /// Creates a delivery channel object to deliver configuration - /// information to an Amazon S3 bucket and Amazon SNS topic. Before you can create a delivery channel, you must create a + /// information and other compliance information to an Amazon S3 bucket and Amazon SNS topic. + /// For more information, + /// see Notifications that Config Sends to an Amazon SNS topic. Before you can create a delivery channel, you must create a /// configuration recorder. You can use this action to change the Amazon S3 bucket or an /// Amazon SNS topic of the existing delivery channel. To change the /// Amazon S3 bucket or an Amazon SNS topic, call this action and @@ -2690,7 +2693,8 @@ extension ConfigService { ) } - /// Returns a list of ConfigurationItems for the specified resource. + /// For accurate reporting on the compliance status, you must record the AWS::Config::ResourceCompliance resource type. + /// For more information, see Selecting Which Resources Config Records. Returns a list of ConfigurationItems for the specified resource. /// The list contains details about each state of the resource /// during the specified time interval. If you specified a retention /// period to retain your ConfigurationItems between a diff --git a/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift b/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift index ec0ecf5901..6508486a27 100644 --- a/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift +++ b/Sources/Soto/Services/ConfigService/ConfigService_shapes.swift @@ -283,9 +283,11 @@ extension ConfigService { case appRunnerVpcConnector = "AWS::AppRunner::VpcConnector" case appStreamApplication = "AWS::AppStream::Application" case appStreamDirectoryConfig = "AWS::AppStream::DirectoryConfig" + case appStreamStack = "AWS::AppStream::Stack" case appSyncGraphQLApi = "AWS::AppSync::GraphQLApi" case application = "AWS::ElasticBeanstalk::Application" case applicationVersion = "AWS::ElasticBeanstalk::ApplicationVersion" + case apsRuleGroupsNamespace = "AWS::APS::RuleGroupsNamespace" case associationCompliance = "AWS::SSM::AssociationCompliance" case athenaDataCatalog = "AWS::Athena::DataCatalog" case athenaPreparedStatement = "AWS::Athena::PreparedStatement" @@ -300,6 +302,7 @@ extension ConfigService { case backupVault = "AWS::Backup::BackupVault" case batchComputeEnvironment = "AWS::Batch::ComputeEnvironment" case batchJobQueue = "AWS::Batch::JobQueue" + case batchSchedulingPolicy = "AWS::Batch::SchedulingPolicy" case bucket = "AWS::S3::Bucket" case budgetsBudgetsAction = "AWS::Budgets::BudgetsAction" case cassandraKeyspace = "AWS::Cassandra::Keyspace" @@ -314,9 +317,11 @@ extension ConfigService { case clusterSnapshot = "AWS::Redshift::ClusterSnapshot" case clusterSubnetGroup = "AWS::Redshift::ClusterSubnetGroup" case codeArtifactRepository = "AWS::CodeArtifact::Repository" + case codeBuildReportGroup = "AWS::CodeBuild::ReportGroup" case codeDeployApplication = "AWS::CodeDeploy::Application" case codeDeployDeploymentConfig = "AWS::CodeDeploy::DeploymentConfig" case codeDeployDeploymentGroup = "AWS::CodeDeploy::DeploymentGroup" + case codeGuruProfilerProfilingGroup = "AWS::CodeGuruProfiler::ProfilingGroup" case codeGuruReviewerRepositoryAssociation = "AWS::CodeGuruReviewer::RepositoryAssociation" case conformancePackCompliance = "AWS::Config::ConformancePackCompliance" case connectPhoneNumber = "AWS::Connect::PhoneNumber" @@ -425,6 +430,7 @@ extension ConfigService { case imageBuilderDistributionConfiguration = "AWS::ImageBuilder::DistributionConfiguration" case imageBuilderImagePipeline = "AWS::ImageBuilder::ImagePipeline" case imageBuilderInfrastructureConfiguration = "AWS::ImageBuilder::InfrastructureConfiguration" + case inspectorV2Filter = "AWS::InspectorV2::Filter" case instance = "AWS::EC2::Instance" case internetGateway = "AWS::EC2::InternetGateway" case ioTAccountAuditConfiguration = "AWS::IoT::AccountAuditConfiguration" @@ -439,8 +445,10 @@ extension ConfigService { case ioTEventsDetectorModel = "AWS::IoTEvents::DetectorModel" case ioTEventsInput = "AWS::IoTEvents::Input" case ioTFleetMetric = "AWS::IoT::FleetMetric" + case ioTJobTemplate = "AWS::IoT::JobTemplate" case ioTMitigationAction = "AWS::IoT::MitigationAction" case ioTPolicy = "AWS::IoT::Policy" + case ioTProvisioningTemplate = "AWS::IoT::ProvisioningTemplate" case ioTRoleAlias = "AWS::IoT::RoleAlias" case ioTScheduledAudit = "AWS::IoT::ScheduledAudit" case ioTSecurityProfile = "AWS::IoT::SecurityProfile" @@ -449,9 +457,12 @@ extension ConfigService { case ioTSiteWiseGateway = "AWS::IoTSiteWise::Gateway" case ioTSiteWisePortal = "AWS::IoTSiteWise::Portal" case ioTSiteWiseProject = "AWS::IoTSiteWise::Project" + case ioTTwinMakerComponentType = "AWS::IoTTwinMaker::ComponentType" case ioTTwinMakerEntity = "AWS::IoTTwinMaker::Entity" case ioTTwinMakerScene = "AWS::IoTTwinMaker::Scene" case ioTTwinMakerWorkspace = "AWS::IoTTwinMaker::Workspace" + case ioTWirelessFuotaTask = "AWS::IoTWireless::FuotaTask" + case ioTWirelessMulticastGroup = "AWS::IoTWireless::MulticastGroup" case ioTWirelessServiceProfile = "AWS::IoTWireless::ServiceProfile" case ipSetV2 = "AWS::WAFv2::IPSet" case ivsChannel = "AWS::IVS::Channel" @@ -482,10 +493,12 @@ extension ConfigService { case managedInstanceInventory = "AWS::SSM::ManagedInstanceInventory" case managedRuleSetV2 = "AWS::WAFv2::ManagedRuleSet" case mediaConnectFlowEntitlement = "AWS::MediaConnect::FlowEntitlement" + case mediaConnectFlowSource = "AWS::MediaConnect::FlowSource" case mediaConnectFlowVpcInterface = "AWS::MediaConnect::FlowVpcInterface" case mediaPackagePackagingConfiguration = "AWS::MediaPackage::PackagingConfiguration" case mediaPackagePackagingGroup = "AWS::MediaPackage::PackagingGroup" case mediaTailorPlaybackConfiguration = "AWS::MediaTailor::PlaybackConfiguration" + case mskBatchScramSecret = "AWS::MSK::BatchScramSecret" case mskCluster = "AWS::MSK::Cluster" case mskConfiguration = "AWS::MSK::Configuration" case natGateway = "AWS::EC2::NatGateway" @@ -506,6 +519,7 @@ extension ConfigService { case panoramaPackage = "AWS::Panorama::Package" case patchCompliance = "AWS::SSM::PatchCompliance" case personalizeDataset = "AWS::Personalize::Dataset" + case personalizeDatasetGroup = "AWS::Personalize::DatasetGroup" case personalizeSchema = "AWS::Personalize::Schema" case personalizeSolution = "AWS::Personalize::Solution" case pinpointApp = "AWS::Pinpoint::App" @@ -554,6 +568,8 @@ extension ConfigService { case route53ResolverFirewallDomainList = "AWS::Route53Resolver::FirewallDomainList" case route53ResolverFirewallRuleGroupAssociation = "AWS::Route53Resolver::FirewallRuleGroupAssociation" case route53ResolverResolverEndpoint = "AWS::Route53Resolver::ResolverEndpoint" + case route53ResolverResolverQueryLoggingConfig = "AWS::Route53Resolver::ResolverQueryLoggingConfig" + case route53ResolverResolverQueryLoggingConfigAssociation = "AWS::Route53Resolver::ResolverQueryLoggingConfigAssociation" case route53ResolverResolverRule = "AWS::Route53Resolver::ResolverRule" case route53ResolverResolverRuleAssociation = "AWS::Route53Resolver::ResolverRuleAssociation" case routeTable = "AWS::EC2::RouteTable" @@ -567,6 +583,7 @@ extension ConfigService { case sageMakerAppImageConfig = "AWS::SageMaker::AppImageConfig" case sageMakerCodeRepository = "AWS::SageMaker::CodeRepository" case sageMakerDomain = "AWS::SageMaker::Domain" + case sageMakerFeatureGroup = "AWS::SageMaker::FeatureGroup" case sageMakerImage = "AWS::SageMaker::Image" case sageMakerModel = "AWS::SageMaker::Model" case sageMakerNotebookInstanceLifecycleConfig = "AWS::SageMaker::NotebookInstanceLifecycleConfig" @@ -576,6 +593,7 @@ extension ConfigService { case secret = "AWS::SecretsManager::Secret" case securityGroup = "AWS::EC2::SecurityGroup" case serviceDiscoveryHttpNamespace = "AWS::ServiceDiscovery::HttpNamespace" + case serviceDiscoveryInstance = "AWS::ServiceDiscovery::Instance" case serviceDiscoveryPublicDnsNamespace = "AWS::ServiceDiscovery::PublicDnsNamespace" case serviceDiscoveryService = "AWS::ServiceDiscovery::Service" case sesConfigurationSet = "AWS::SES::ConfigurationSet" @@ -595,6 +613,7 @@ extension ConfigService { case topic = "AWS::SNS::Topic" case trail = "AWS::CloudTrail::Trail" case transferAgreement = "AWS::Transfer::Agreement" + case transferCertificate = "AWS::Transfer::Certificate" case transferConnector = "AWS::Transfer::Connector" case transferWorkflow = "AWS::Transfer::Workflow" case transitGateway = "AWS::EC2::TransitGateway" @@ -1014,7 +1033,7 @@ extension ConfigService { public let configuration: String? /// The time when the configuration recording was initiated. public let configurationItemCaptureTime: Date? - /// The configuration item status. The valid values are: OK – The resource configuration has been updated ResourceDiscovered – The resource was newly discovered ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type ResourceDeleted – The resource was deleted ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type The CIs do not incur any cost. + /// The configuration item status. The valid values are: OK – The resource configuration has been updated ResourceDiscovered – The resource was newly discovered ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type ResourceDeleted – The resource was deleted ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type public let configurationItemStatus: ConfigurationItemStatus? /// An identifier that indicates the ordering of the configuration /// items of a resource. @@ -1670,7 +1689,7 @@ extension ConfigService { /// configuration items that are associated with the same /// resource. public let configurationItemMD5Hash: String? - /// The configuration item status. The valid values are: OK – The resource configuration has been updated ResourceDiscovered – The resource was newly discovered ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type ResourceDeleted – The resource was deleted ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type The CIs do not incur any cost. + /// The configuration item status. The valid values are: OK – The resource configuration has been updated ResourceDiscovered – The resource was newly discovered ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type ResourceDeleted – The resource was deleted ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type public let configurationItemStatus: ConfigurationItemStatus? /// An identifier that indicates the ordering of the configuration /// items of a resource. @@ -4995,12 +5014,12 @@ extension ConfigService { /// default, the results are listed in reverse chronological /// order. public let chronologicalOrder: ChronologicalOrder? - /// The time stamp that indicates an earlier time. If not + /// The chronologically earliest time in the time range for which the history requested. If not /// specified, the action returns paginated results that contain /// configuration items that start when the first configuration item was /// recorded. public let earlierTime: Date? - /// The time stamp that indicates a later time. If not specified, + /// The chronologically latest time in the time range for which the history requested. If not specified, /// current time is taken. public let laterTime: Date? /// The maximum number of configuration items returned on each @@ -6200,6 +6219,7 @@ extension ConfigService { /// A string containing the full conformance pack template body. The structure containing the template body has a minimum length of 1 byte and a maximum length of 51,200 bytes. You can use a YAML template with two resource types: Config rule (AWS::Config::ConfigRule) and remediation action (AWS::Config::RemediationConfiguration). public let templateBody: String? /// The location of the file containing the template body (s3://bucketname/prefix). The uri must point to a conformance pack template (max size: 300 KB) that is located in an Amazon S3 bucket in the same Region as the conformance pack. You must have access to read Amazon S3 bucket. + /// In addition, in order to ensure a successful deployment, the template object must not be in an archived storage class if this parameter is passed. public let templateS3Uri: String? /// An object of type TemplateSSMDocumentDetails, which contains the name or the Amazon Resource Name (ARN) of the Amazon Web Services Systems Manager document (SSM document) and the version of the SSM document that is used to create a conformance pack. public let templateSSMDocumentDetails: TemplateSSMDocumentDetails? @@ -6432,6 +6452,7 @@ extension ConfigService { public let templateBody: String? /// Location of file containing the template body. The uri must point to the conformance pack template /// (max size: 300 KB). You must have access to read Amazon S3 bucket. + /// In addition, in order to ensure a successful deployment, the template object must not be in an archived storage class if this parameter is passed. public let templateS3Uri: String? public init(conformancePackInputParameters: [ConformancePackInputParameter]? = nil, deliveryS3Bucket: String? = nil, deliveryS3KeyPrefix: String? = nil, excludedAccounts: [String]? = nil, organizationConformancePackName: String, templateBody: String? = nil, templateS3Uri: String? = nil) { @@ -6705,27 +6726,33 @@ extension ConfigService { } public struct RecordingGroup: AWSEncodableShape & AWSDecodableShape { - /// Specifies whether Config records configuration changes for all supported regional resource types. If you set this field to true, when Config - /// adds support for a new type of regional resource, Config starts recording resources of that type automatically. If you set this field to true, - /// you cannot enumerate specific resource types to record in the resourceTypes field of RecordingGroup, or to exclude in the resourceTypes field of ExclusionByResourceTypes. + /// Specifies whether Config records configuration changes for all supported regionally recorded resource types. If you set this field to true, when Config + /// adds support for a new regionally recorded resource type, Config starts recording resources of that type automatically. If you set this field to true, + /// you cannot enumerate specific resource types to record in the resourceTypes field of RecordingGroup, or to exclude in the resourceTypes field of ExclusionByResourceTypes. Region Availability Check Resource Coverage by Region Availability + /// to see if a resource type is supported in the Amazon Web Services Region where you set up Config. public let allSupported: Bool? /// An object that specifies how Config excludes resource types from being recorded by the configuration recorder. To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES. public let exclusionByResourceTypes: ExclusionByResourceTypes? - /// Specifies whether Config records configuration changes for all supported global resources. Before you set this field to true, + /// A legacy field which only applies to the globally recorded IAM resource types: IAM users, groups, roles, and customer managed policies. + /// If you select this option, these resource types will be recorded in all enabled Config regions where Config was available before February 2022. + /// This list does not include the following Regions: Asia Pacific (Hyderabad) Asia Pacific (Melbourne) Europe (Spain) Europe (Zurich) Israel (Tel Aviv) Middle East (UAE) Aurora global clusters are automatically globally recorded The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled, even if includeGlobalResourceTypes is not set to true. + /// includeGlobalResourceTypes is a legacy field which only applies to IAM users, groups, roles, and customer managed policies. + /// If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use one of the following recording strategies: Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or Record specific resource types (INCLUSION_BY_RESOURCE_TYPES). For more information, see Selecting Which Resources are Recorded in the Config developer guide. Required and optional fields Before you set this field to true, /// set the allSupported field of RecordingGroup to - /// true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES. If you set this field to true, when Config - /// adds support for a new type of global resource in the Region where you set up the configuration recorder, Config starts recording - /// resources of that type automatically. If you set this field to false but list global resource types in the resourceTypes field of RecordingGroup, - /// Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false. If you do not want to record configuration changes to global resource types, make sure to not list them in the resourceTypes field + /// true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES. Overriding fields If you set this field to false but list globally recorded IAM resource types in the resourceTypes field of RecordingGroup, + /// Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false. If you do not want to record configuration changes to the globally recorded IAM resource types (IAM users, groups, roles, and customer managed policies), make sure to not list them in the resourceTypes field /// in addition to setting the includeGlobalResourceTypes field to false. public let includeGlobalResourceTypes: Bool? - /// An object that specifies the recording strategy for the configuration recorder. If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup. If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types - /// except the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes. The recordingStrategy field is optional when you set the + /// An object that specifies the recording strategy for the configuration recorder. If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regionally recorded resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new regionally recorded resource type, Config automatically starts recording resources of that type. If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup. If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types + /// except the resource types that you specify to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes. Required and optional fields The recordingStrategy field is optional when you set the /// allSupported field of RecordingGroup to true. The recordingStrategy field is optional when you list resource types in the - /// resourceTypes field of RecordingGroup. The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes. If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request. For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically - /// recorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes. By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, - /// when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, - /// Config starts recording resources of that type automatically. + /// resourceTypes field of RecordingGroup. The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes. Overriding fields If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request. For example, even if you set includeGlobalResourceTypes to false, globally recorded IAM resource types will still be automatically + /// recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes. Global resources types and the resource exclusion recording strategy By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, + /// when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, + /// Config starts recording resources of that type automatically. In addition, unless specifically listed as exclusions, + /// AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled. + /// IAM users, groups, roles, and customer managed policies will be recorded automatically in all enabled Config Regions where Config was available before February 2022. + /// This list does not include the following Regions: Asia Pacific (Hyderabad) Asia Pacific (Melbourne) Europe (Spain) Europe (Zurich) Israel (Tel Aviv) Middle East (UAE) public let recordingStrategy: RecordingStrategy? /// A comma-separated list that specifies which resource types Config /// records. Optionally, you can set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES. To record all configuration changes, @@ -6760,18 +6787,23 @@ extension ConfigService { } public struct RecordingStrategy: AWSEncodableShape & AWSDecodableShape { - /// The recording strategy for the configuration recorder. If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. For a list of supported resource types, + /// The recording strategy for the configuration recorder. If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regionally recorded resource types. + /// You also must set the allSupported field of RecordingGroup to true. + /// When Config adds support for a new regionally recorded resource type, Config automatically starts recording resources of that type. For a list of supported resource types, /// see Supported Resource Types in the Config developer guide. If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records /// configuration changes for only the resource types that you specify in the /// resourceTypes field of RecordingGroup. If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records /// configuration changes for all supported resource types, except the resource - /// types that you specify as exemptions to exclude from being recorded in the - /// resourceTypes field of ExclusionByResourceTypes. The recordingStrategy field is optional when you set the + /// types that you specify to exclude from being recorded in the + /// resourceTypes field of ExclusionByResourceTypes. Required and optional fields The recordingStrategy field is optional when you set the /// allSupported field of RecordingGroup to true. The recordingStrategy field is optional when you list resource types in the - /// resourceTypes field of RecordingGroup. The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes. If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request. For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically - /// recorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes. By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, + /// resourceTypes field of RecordingGroup. The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes. Overriding fields If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request. For example, even if you set includeGlobalResourceTypes to false, globally recorded IAM resource types will still be automatically + /// recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes. Global resource types and the exclusion recording strategy By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, /// when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, - /// Config starts recording resources of that type automatically. + /// Config starts recording resources of that type automatically. In addition, unless specifically listed as exclusions, + /// AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled. + /// IAM users, groups, roles, and customer managed policies will be recorded automatically in all enabled Config Regions where Config was available before February 2022. + /// This list does not include the following Regions: Asia Pacific (Hyderabad) Asia Pacific (Melbourne) Europe (Spain) Europe (Zurich) Israel (Tel Aviv) Middle East (UAE) public let useOnly: RecordingStrategyType? public init(useOnly: RecordingStrategyType? = nil) { diff --git a/Sources/Soto/Services/ControlTower/ControlTower_api+async.swift b/Sources/Soto/Services/ControlTower/ControlTower_api+async.swift index 4f02320510..e91f06d36c 100644 --- a/Sources/Soto/Services/ControlTower/ControlTower_api+async.swift +++ b/Sources/Soto/Services/ControlTower/ControlTower_api+async.swift @@ -21,22 +21,27 @@ import SotoCore extension ControlTower { // MARK: Async API Calls - /// This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. + /// This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the AWS Control Tower User Guide . public func disableControl(_ input: DisableControlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DisableControlOutput { return try await self.client.execute(operation: "DisableControl", path: "/disable-control", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// This API call activates a control. It starts an asynchronous operation that creates AWS resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. + /// This API call activates a control. It starts an asynchronous operation that creates AWS resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the AWS Control Tower User Guide public func enableControl(_ input: EnableControlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> EnableControlOutput { return try await self.client.execute(operation: "EnableControl", path: "/enable-control", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. + /// Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the AWS Control Tower User Guide public func getControlOperation(_ input: GetControlOperationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetControlOperationOutput { return try await self.client.execute(operation: "GetControlOperation", path: "/get-control-operation", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. + /// Provides details about the enabled control. For usage examples, see the AWS Control Tower User Guide . Returned values TargetRegions: Shows target AWS Regions where the enabled control is available to be deployed. StatusSummary: Provides a detailed summary of the deployment status. DriftSummary: Provides a detailed summary of the drifted status. + public func getEnabledControl(_ input: GetEnabledControlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetEnabledControlOutput { + return try await self.client.execute(operation: "GetEnabledControl", path: "/get-enabled-control", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the AWS Control Tower User Guide public func listEnabledControls(_ input: ListEnabledControlsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListEnabledControlsOutput { return try await self.client.execute(operation: "ListEnabledControls", path: "/list-enabled-controls", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -46,7 +51,7 @@ extension ControlTower { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension ControlTower { - /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. + /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the AWS Control Tower User Guide /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/ControlTower/ControlTower_api.swift b/Sources/Soto/Services/ControlTower/ControlTower_api.swift index 8ab4d0890f..13020d356e 100644 --- a/Sources/Soto/Services/ControlTower/ControlTower_api.swift +++ b/Sources/Soto/Services/ControlTower/ControlTower_api.swift @@ -19,8 +19,7 @@ /// Service object for interacting with AWS ControlTower service. /// -/// These interfaces allow you to apply the AWS library of pre-defined controls to your -/// organizational units, programmatically. In this context, controls are the same as AWS Control Tower guardrails. To call these APIs, you'll need to know: the ControlARN for the control--that is, the guardrail--you are targeting, and the ARN associated with the target organizational unit (OU). To get the ControlARN for your AWS Control Tower guardrail: The ControlARN contains the control name which is specified in each guardrail. For a list of control names for Strongly recommended and Elective guardrails, see Resource identifiers for APIs and guardrails in the Automating tasks section of the AWS Control Tower User Guide. Remember that Mandatory guardrails cannot be added or removed. ARN format: arn:aws:controltower:{REGION}::control/{CONTROL_NAME} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED To get the ARN for an OU: In the AWS Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} Details and examples List of resource identifiers for APIs and guardrails Guardrail API examples (CLI) Enable controls with AWS CloudFormation Creating AWS Control Tower resources with AWS CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests AWS Control Tower supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the AWS Control Tower service received, who made the request and when, and so on. For more about AWS Control Tower and its support for CloudTrail, see Logging AWS Control Tower Actions with AWS CloudTrail in the AWS Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide. +/// These interfaces allow you to apply the AWS library of pre-defined controls to your organizational units, programmatically. In AWS Control Tower, the terms "control" and "guardrail" are synonyms. . To call these APIs, you'll need to know: the controlIdentifier for the control--or guardrail--you are targeting. the ARN associated with the target organizational unit (OU), which we call the targetIdentifier. To get the controlIdentifier for your AWS Control Tower control: The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation. The controlIdentifier is unique in each AWS Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata in the AWS Control Tower User Guide. A quick-reference list of control identifers for the AWS Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and guardrails in the Controls reference guide section of the AWS Control Tower User Guide. Remember that Mandatory controls cannot be added or removed. ARN format: arn:aws:controltower:{REGION}::control/{CONTROL_NAME} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED To get the targetIdentifier: The targetIdentifier is the ARN for an OU. In the AWS Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} Details and examples Control API input and output examples with CLI Enable controls with CloudFormation Control metadata tables List of identifiers for legacy controls Controls reference guide Controls library groupings Creating AWS Control Tower resources with AWS CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests AWS Control Tower supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the AWS Control Tower service received, who made the request and when, and so on. For more about AWS Control Tower and its support for CloudTrail, see Logging AWS Control Tower Actions with AWS CloudTrail in the AWS Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide. public struct ControlTower: AWSService { // MARK: Member variables @@ -73,22 +72,27 @@ public struct ControlTower: AWSService { // MARK: API Calls - /// This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. + /// This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the AWS Control Tower User Guide . public func disableControl(_ input: DisableControlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DisableControl", path: "/disable-control", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// This API call activates a control. It starts an asynchronous operation that creates AWS resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. + /// This API call activates a control. It starts an asynchronous operation that creates AWS resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the AWS Control Tower User Guide public func enableControl(_ input: EnableControlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "EnableControl", path: "/enable-control", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. + /// Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the AWS Control Tower User Guide public func getControlOperation(_ input: GetControlOperationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "GetControlOperation", path: "/get-control-operation", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. + /// Provides details about the enabled control. For usage examples, see the AWS Control Tower User Guide . Returned values TargetRegions: Shows target AWS Regions where the enabled control is available to be deployed. StatusSummary: Provides a detailed summary of the deployment status. DriftSummary: Provides a detailed summary of the drifted status. + public func getEnabledControl(_ input: GetEnabledControlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetEnabledControl", path: "/get-enabled-control", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the AWS Control Tower User Guide public func listEnabledControls(_ input: ListEnabledControlsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ListEnabledControls", path: "/list-enabled-controls", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -106,7 +110,7 @@ extension ControlTower { // MARK: Paginators extension ControlTower { - /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. + /// Lists the controls enabled by AWS Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the AWS Control Tower User Guide /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. diff --git a/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift b/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift index 34e422c31e..f6c8d18f63 100644 --- a/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift +++ b/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift @@ -39,6 +39,21 @@ extension ControlTower { public var description: String { return self.rawValue } } + public enum DriftStatus: String, CustomStringConvertible, Codable, Sendable { + case drifted = "DRIFTED" + case inSync = "IN_SYNC" + case notChecking = "NOT_CHECKING" + case unknown = "UNKNOWN" + public var description: String { return self.rawValue } + } + + public enum EnablementStatus: String, CustomStringConvertible, Codable, Sendable { + case failed = "FAILED" + case succeeded = "SUCCEEDED" + case underChange = "UNDER_CHANGE" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct ControlOperation: AWSDecodableShape { @@ -73,9 +88,9 @@ extension ControlTower { } public struct DisableControlInput: AWSEncodableShape { - /// The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny guardrail. + /// The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page. public let controlIdentifier: String - /// The ARN of the organizational unit. + /// The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page. public let targetIdentifier: String public init(controlIdentifier: String, targetIdentifier: String) { @@ -111,10 +126,23 @@ extension ControlTower { } } + public struct DriftStatusSummary: AWSDecodableShape { + /// The drift status of the enabled control. Valid values: DRIFTED: The enabledControl deployed in this configuration doesn’t match the configuration that AWS Control Tower expected. IN_SYNC: The enabledControl deployed in this configuration matches the configuration that AWS Control Tower expected. NOT_CHECKING: AWS Control Tower does not check drift for this enabled control. Drift is not supported for the control type. UNKNOWN: AWS Control Tower is not able to check the drift status for the enabled control. + public let driftStatus: DriftStatus? + + public init(driftStatus: DriftStatus? = nil) { + self.driftStatus = driftStatus + } + + private enum CodingKeys: String, CodingKey { + case driftStatus = "driftStatus" + } + } + public struct EnableControlInput: AWSEncodableShape { - /// The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny guardrail. + /// The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page. public let controlIdentifier: String - /// The ARN of the organizational unit. + /// The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page. public let targetIdentifier: String public init(controlIdentifier: String, targetIdentifier: String) { @@ -150,16 +178,81 @@ extension ControlTower { } } - public struct EnabledControlSummary: AWSDecodableShape { - /// The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny guardrail. + public struct EnabledControlDetails: AWSDecodableShape { + /// The ARN of the enabled control. + public let arn: String? + /// The control identifier of the enabled control. For information on how to find the controlIdentifier, see the overview page. public let controlIdentifier: String? + /// The drift status of the enabled control. + public let driftStatusSummary: DriftStatusSummary? + /// The deployment summary of the enabled control. + public let statusSummary: EnablementStatusSummary? + /// The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page. + public let targetIdentifier: String? + /// Target AWS Regions for the enabled control. + public let targetRegions: [Region]? + + public init(arn: String? = nil, controlIdentifier: String? = nil, driftStatusSummary: DriftStatusSummary? = nil, statusSummary: EnablementStatusSummary? = nil, targetIdentifier: String? = nil, targetRegions: [Region]? = nil) { + self.arn = arn + self.controlIdentifier = controlIdentifier + self.driftStatusSummary = driftStatusSummary + self.statusSummary = statusSummary + self.targetIdentifier = targetIdentifier + self.targetRegions = targetRegions + } - public init(controlIdentifier: String? = nil) { + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case controlIdentifier = "controlIdentifier" + case driftStatusSummary = "driftStatusSummary" + case statusSummary = "statusSummary" + case targetIdentifier = "targetIdentifier" + case targetRegions = "targetRegions" + } + } + + public struct EnabledControlSummary: AWSDecodableShape { + /// The ARN of the enabled control. + public let arn: String? + /// The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page. + public let controlIdentifier: String? + /// The drift status of the enabled control. + public let driftStatusSummary: DriftStatusSummary? + public let statusSummary: EnablementStatusSummary? + /// The ARN of the organizational unit. + public let targetIdentifier: String? + + public init(arn: String? = nil, controlIdentifier: String? = nil, driftStatusSummary: DriftStatusSummary? = nil, statusSummary: EnablementStatusSummary? = nil, targetIdentifier: String? = nil) { + self.arn = arn self.controlIdentifier = controlIdentifier + self.driftStatusSummary = driftStatusSummary + self.statusSummary = statusSummary + self.targetIdentifier = targetIdentifier } private enum CodingKeys: String, CodingKey { + case arn = "arn" case controlIdentifier = "controlIdentifier" + case driftStatusSummary = "driftStatusSummary" + case statusSummary = "statusSummary" + case targetIdentifier = "targetIdentifier" + } + } + + public struct EnablementStatusSummary: AWSDecodableShape { + /// The last operation identifier for the enabled control. + public let lastOperationIdentifier: String? + /// The deployment status of the enabled control. Valid values: SUCCEEDED: The enabledControl configuration was deployed successfully. UNDER_CHANGE: The enabledControl configuration is changing. FAILED: The enabledControl configuration failed to deploy. + public let status: EnablementStatus? + + public init(lastOperationIdentifier: String? = nil, status: EnablementStatus? = nil) { + self.lastOperationIdentifier = lastOperationIdentifier + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case lastOperationIdentifier = "lastOperationIdentifier" + case status = "status" } } @@ -183,6 +276,7 @@ extension ControlTower { } public struct GetControlOperationOutput: AWSDecodableShape { + /// An operation performed by the control. public let controlOperation: ControlOperation public init(controlOperation: ControlOperation) { @@ -194,12 +288,44 @@ extension ControlTower { } } + public struct GetEnabledControlInput: AWSEncodableShape { + /// The ARN of the enabled control. + public let enabledControlIdentifier: String + + public init(enabledControlIdentifier: String) { + self.enabledControlIdentifier = enabledControlIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.enabledControlIdentifier, name: "enabledControlIdentifier", parent: name, max: 2048) + try self.validate(self.enabledControlIdentifier, name: "enabledControlIdentifier", parent: name, min: 20) + try self.validate(self.enabledControlIdentifier, name: "enabledControlIdentifier", parent: name, pattern: "^arn:aws[0-9a-zA-Z_\\-:\\/]+$") + } + + private enum CodingKeys: String, CodingKey { + case enabledControlIdentifier = "enabledControlIdentifier" + } + } + + public struct GetEnabledControlOutput: AWSDecodableShape { + /// Information about the enabled control. + public let enabledControlDetails: EnabledControlDetails + + public init(enabledControlDetails: EnabledControlDetails) { + self.enabledControlDetails = enabledControlDetails + } + + private enum CodingKeys: String, CodingKey { + case enabledControlDetails = "enabledControlDetails" + } + } + public struct ListEnabledControlsInput: AWSEncodableShape { /// How many results to return per API call. public let maxResults: Int? /// The token to continue the list from a previous API call with the same parameters. public let nextToken: String? - /// The ARN of the organizational unit. + /// The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page. public let targetIdentifier: String public init(maxResults: Int? = nil, nextToken: String? = nil, targetIdentifier: String) { @@ -209,7 +335,7 @@ extension ControlTower { } public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 200) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.targetIdentifier, name: "targetIdentifier", parent: name, max: 2048) try self.validate(self.targetIdentifier, name: "targetIdentifier", parent: name, min: 20) @@ -239,6 +365,19 @@ extension ControlTower { case nextToken = "nextToken" } } + + public struct Region: AWSDecodableShape { + /// The AWS Region name. + public let name: String? + + public init(name: String? = nil) { + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/DataZone/DataZone_api+async.swift b/Sources/Soto/Services/DataZone/DataZone_api+async.swift new file mode 100644 index 0000000000..eddc7841c8 --- /dev/null +++ b/Sources/Soto/Services/DataZone/DataZone_api+async.swift @@ -0,0 +1,960 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +import SotoCore + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension DataZone { + // MARK: Async API Calls + + /// Accepts automatically generated business-friendly metadata for your Amazon DataZone assets. + public func acceptPredictions(_ input: AcceptPredictionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> AcceptPredictionsOutput { + return try await self.client.execute(operation: "AcceptPredictions", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/accept-predictions", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Accepts a subscription request to a specific asset. + public func acceptSubscriptionRequest(_ input: AcceptSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> AcceptSubscriptionRequestOutput { + return try await self.client.execute(operation: "AcceptSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}/accept", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Cancels the subscription to the specified asset. + public func cancelSubscription(_ input: CancelSubscriptionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CancelSubscriptionOutput { + return try await self.client.execute(operation: "CancelSubscription", path: "/v2/domains/{domainIdentifier}/subscriptions/{identifier}/cancel", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an asset in Amazon DataZone catalog. + public func createAsset(_ input: CreateAssetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateAssetOutput { + return try await self.client.execute(operation: "CreateAsset", path: "/v2/domains/{domainIdentifier}/assets", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a revision of the asset. + public func createAssetRevision(_ input: CreateAssetRevisionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateAssetRevisionOutput { + return try await self.client.execute(operation: "CreateAssetRevision", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/revisions", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a custom asset type. + public func createAssetType(_ input: CreateAssetTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateAssetTypeOutput { + return try await self.client.execute(operation: "CreateAssetType", path: "/v2/domains/{domainIdentifier}/asset-types", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone data source. + public func createDataSource(_ input: CreateDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateDataSourceOutput { + return try await self.client.execute(operation: "CreateDataSource", path: "/v2/domains/{domainIdentifier}/data-sources", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone domain. + public func createDomain(_ input: CreateDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateDomainOutput { + return try await self.client.execute(operation: "CreateDomain", path: "/v2/domains", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Create an Amazon DataZone environment. + public func createEnvironment(_ input: CreateEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateEnvironmentOutput { + return try await self.client.execute(operation: "CreateEnvironment", path: "/v2/domains/{domainIdentifier}/environments", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone environment profile. + public func createEnvironmentProfile(_ input: CreateEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateEnvironmentProfileOutput { + return try await self.client.execute(operation: "CreateEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a metadata form type. + public func createFormType(_ input: CreateFormTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateFormTypeOutput { + return try await self.client.execute(operation: "CreateFormType", path: "/v2/domains/{domainIdentifier}/form-types", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone business glossary. + public func createGlossary(_ input: CreateGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateGlossaryOutput { + return try await self.client.execute(operation: "CreateGlossary", path: "/v2/domains/{domainIdentifier}/glossaries", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a business glossary term. + public func createGlossaryTerm(_ input: CreateGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateGlossaryTermOutput { + return try await self.client.execute(operation: "CreateGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a group profile in Amazon DataZone. + public func createGroupProfile(_ input: CreateGroupProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateGroupProfileOutput { + return try await self.client.execute(operation: "CreateGroupProfile", path: "/v2/domains/{domainIdentifier}/group-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + public func createListingChangeSet(_ input: CreateListingChangeSetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateListingChangeSetOutput { + return try await self.client.execute(operation: "CreateListingChangeSet", path: "/v2/domains/{domainIdentifier}/listings/change-set", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone project. + public func createProject(_ input: CreateProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateProjectOutput { + return try await self.client.execute(operation: "CreateProject", path: "/v2/domains/{domainIdentifier}/projects", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a project membership in Amazon DataZone. + public func createProjectMembership(_ input: CreateProjectMembershipInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateProjectMembershipOutput { + return try await self.client.execute(operation: "CreateProjectMembership", path: "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a subsscription grant in Amazon DataZone. + public func createSubscriptionGrant(_ input: CreateSubscriptionGrantInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateSubscriptionGrantOutput { + return try await self.client.execute(operation: "CreateSubscriptionGrant", path: "/v2/domains/{domainIdentifier}/subscription-grants", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a subscription request in Amazon DataZone. + public func createSubscriptionRequest(_ input: CreateSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateSubscriptionRequestOutput { + return try await self.client.execute(operation: "CreateSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a subscription target in Amazon DataZone. + public func createSubscriptionTarget(_ input: CreateSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateSubscriptionTargetOutput { + return try await self.client.execute(operation: "CreateSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a user profile in Amazon DataZone. + public func createUserProfile(_ input: CreateUserProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateUserProfileOutput { + return try await self.client.execute(operation: "CreateUserProfile", path: "/v2/domains/{domainIdentifier}/user-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Delets an asset in Amazon DataZone. + public func deleteAsset(_ input: DeleteAssetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteAssetOutput { + return try await self.client.execute(operation: "DeleteAsset", path: "/v2/domains/{domainIdentifier}/assets/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an asset type in Amazon DataZone. + public func deleteAssetType(_ input: DeleteAssetTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteAssetTypeOutput { + return try await self.client.execute(operation: "DeleteAssetType", path: "/v2/domains/{domainIdentifier}/asset-types/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a data source in Amazon DataZone. + public func deleteDataSource(_ input: DeleteDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteDataSourceOutput { + return try await self.client.execute(operation: "DeleteDataSource", path: "/v2/domains/{domainIdentifier}/data-sources/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a Amazon DataZone domain. + public func deleteDomain(_ input: DeleteDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteDomainOutput { + return try await self.client.execute(operation: "DeleteDomain", path: "/v2/domains/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an environment in Amazon DataZone. + public func deleteEnvironment(_ input: DeleteEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { + return try await self.client.execute(operation: "DeleteEnvironment", path: "/v2/domains/{domainIdentifier}/environments/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes the blueprint configuration in Amazon DataZone. + public func deleteEnvironmentBlueprintConfiguration(_ input: DeleteEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteEnvironmentBlueprintConfigurationOutput { + return try await self.client.execute(operation: "DeleteEnvironmentBlueprintConfiguration", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an environment profile in Amazon DataZone. + public func deleteEnvironmentProfile(_ input: DeleteEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { + return try await self.client.execute(operation: "DeleteEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Delets and metadata form type in Amazon DataZone. + public func deleteFormType(_ input: DeleteFormTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteFormTypeOutput { + return try await self.client.execute(operation: "DeleteFormType", path: "/v2/domains/{domainIdentifier}/form-types/{formTypeIdentifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a business glossary in Amazon DataZone. + public func deleteGlossary(_ input: DeleteGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteGlossaryOutput { + return try await self.client.execute(operation: "DeleteGlossary", path: "/v2/domains/{domainIdentifier}/glossaries/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a business glossary term in Amazon DataZone. + public func deleteGlossaryTerm(_ input: DeleteGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteGlossaryTermOutput { + return try await self.client.execute(operation: "DeleteGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + public func deleteListing(_ input: DeleteListingInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteListingOutput { + return try await self.client.execute(operation: "DeleteListing", path: "/v2/domains/{domainIdentifier}/listings/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a project in Amazon DataZone. + public func deleteProject(_ input: DeleteProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteProjectOutput { + return try await self.client.execute(operation: "DeleteProject", path: "/v2/domains/{domainIdentifier}/projects/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes project membership in Amazon DataZone. + public func deleteProjectMembership(_ input: DeleteProjectMembershipInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteProjectMembershipOutput { + return try await self.client.execute(operation: "DeleteProjectMembership", path: "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/deleteMembership", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes and subscription grant in Amazon DataZone. + public func deleteSubscriptionGrant(_ input: DeleteSubscriptionGrantInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteSubscriptionGrantOutput { + return try await self.client.execute(operation: "DeleteSubscriptionGrant", path: "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a subscription request in Amazon DataZone. + public func deleteSubscriptionRequest(_ input: DeleteSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { + return try await self.client.execute(operation: "DeleteSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a subscription target in Amazon DataZone. + public func deleteSubscriptionTarget(_ input: DeleteSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { + return try await self.client.execute(operation: "DeleteSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone asset. + public func getAsset(_ input: GetAssetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetAssetOutput { + return try await self.client.execute(operation: "GetAsset", path: "/v2/domains/{domainIdentifier}/assets/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone asset type. + public func getAssetType(_ input: GetAssetTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetAssetTypeOutput { + return try await self.client.execute(operation: "GetAssetType", path: "/v2/domains/{domainIdentifier}/asset-types/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone data source. + public func getDataSource(_ input: GetDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetDataSourceOutput { + return try await self.client.execute(operation: "GetDataSource", path: "/v2/domains/{domainIdentifier}/data-sources/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone data source run. + public func getDataSourceRun(_ input: GetDataSourceRunInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetDataSourceRunOutput { + return try await self.client.execute(operation: "GetDataSourceRun", path: "/v2/domains/{domainIdentifier}/data-source-runs/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone domain. + public func getDomain(_ input: GetDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetDomainOutput { + return try await self.client.execute(operation: "GetDomain", path: "/v2/domains/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone environment. + public func getEnvironment(_ input: GetEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetEnvironmentOutput { + return try await self.client.execute(operation: "GetEnvironment", path: "/v2/domains/{domainIdentifier}/environments/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone blueprint. + public func getEnvironmentBlueprint(_ input: GetEnvironmentBlueprintInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetEnvironmentBlueprintOutput { + return try await self.client.execute(operation: "GetEnvironmentBlueprint", path: "/v2/domains/{domainIdentifier}/environment-blueprints/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the blueprint configuration in Amazon DataZone. + public func getEnvironmentBlueprintConfiguration(_ input: GetEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetEnvironmentBlueprintConfigurationOutput { + return try await self.client.execute(operation: "GetEnvironmentBlueprintConfiguration", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an evinronment profile in Amazon DataZone. + public func getEnvironmentProfile(_ input: GetEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetEnvironmentProfileOutput { + return try await self.client.execute(operation: "GetEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a metadata form type in Amazon DataZone. + public func getFormType(_ input: GetFormTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetFormTypeOutput { + return try await self.client.execute(operation: "GetFormType", path: "/v2/domains/{domainIdentifier}/form-types/{formTypeIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a business glossary in Amazon DataZone. + public func getGlossary(_ input: GetGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetGlossaryOutput { + return try await self.client.execute(operation: "GetGlossary", path: "/v2/domains/{domainIdentifier}/glossaries/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a business glossary term in Amazon DataZone. + public func getGlossaryTerm(_ input: GetGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetGlossaryTermOutput { + return try await self.client.execute(operation: "GetGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a group profile in Amazon DataZone. + public func getGroupProfile(_ input: GetGroupProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetGroupProfileOutput { + return try await self.client.execute(operation: "GetGroupProfile", path: "/v2/domains/{domainIdentifier}/group-profiles/{groupIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the data portal URL for the specified Amazon DataZone domain. + public func getIamPortalLoginUrl(_ input: GetIamPortalLoginUrlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetIamPortalLoginUrlOutput { + return try await self.client.execute(operation: "GetIamPortalLoginUrl", path: "/v2/domains/{domainIdentifier}/get-portal-login-url", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + public func getListing(_ input: GetListingInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetListingOutput { + return try await self.client.execute(operation: "GetListing", path: "/v2/domains/{domainIdentifier}/listings/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a project in Amazon DataZone. + public func getProject(_ input: GetProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetProjectOutput { + return try await self.client.execute(operation: "GetProject", path: "/v2/domains/{domainIdentifier}/projects/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a subscription in Amazon DataZone. + public func getSubscription(_ input: GetSubscriptionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetSubscriptionOutput { + return try await self.client.execute(operation: "GetSubscription", path: "/v2/domains/{domainIdentifier}/subscriptions/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the subscription grant in Amazon DataZone. + public func getSubscriptionGrant(_ input: GetSubscriptionGrantInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetSubscriptionGrantOutput { + return try await self.client.execute(operation: "GetSubscriptionGrant", path: "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the details of the specified subscription request. + public func getSubscriptionRequestDetails(_ input: GetSubscriptionRequestDetailsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetSubscriptionRequestDetailsOutput { + return try await self.client.execute(operation: "GetSubscriptionRequestDetails", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the subscription target in Amazon DataZone. + public func getSubscriptionTarget(_ input: GetSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetSubscriptionTargetOutput { + return try await self.client.execute(operation: "GetSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a user profile in Amazon DataZone. + public func getUserProfile(_ input: GetUserProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetUserProfileOutput { + return try await self.client.execute(operation: "GetUserProfile", path: "/v2/domains/{domainIdentifier}/user-profiles/{userIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists the revisions for the asset. + public func listAssetRevisions(_ input: ListAssetRevisionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListAssetRevisionsOutput { + return try await self.client.execute(operation: "ListAssetRevisions", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/revisions", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists data source run activities. + public func listDataSourceRunActivities(_ input: ListDataSourceRunActivitiesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListDataSourceRunActivitiesOutput { + return try await self.client.execute(operation: "ListDataSourceRunActivities", path: "/v2/domains/{domainIdentifier}/data-source-runs/{identifier}/activities", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists data source runs in Amazon DataZone. + public func listDataSourceRuns(_ input: ListDataSourceRunsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListDataSourceRunsOutput { + return try await self.client.execute(operation: "ListDataSourceRuns", path: "/v2/domains/{domainIdentifier}/data-sources/{dataSourceIdentifier}/runs", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists data sources in Amazon DataZone. + public func listDataSources(_ input: ListDataSourcesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListDataSourcesOutput { + return try await self.client.execute(operation: "ListDataSources", path: "/v2/domains/{domainIdentifier}/data-sources", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone domains. + public func listDomains(_ input: ListDomainsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListDomainsOutput { + return try await self.client.execute(operation: "ListDomains", path: "/v2/domains", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists blueprint configurations for a Amazon DataZone environment. + public func listEnvironmentBlueprintConfigurations(_ input: ListEnvironmentBlueprintConfigurationsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListEnvironmentBlueprintConfigurationsOutput { + return try await self.client.execute(operation: "ListEnvironmentBlueprintConfigurations", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists blueprints in an Amazon DataZone environment. + public func listEnvironmentBlueprints(_ input: ListEnvironmentBlueprintsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListEnvironmentBlueprintsOutput { + return try await self.client.execute(operation: "ListEnvironmentBlueprints", path: "/v2/domains/{domainIdentifier}/environment-blueprints", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone environment profiles. + public func listEnvironmentProfiles(_ input: ListEnvironmentProfilesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListEnvironmentProfilesOutput { + return try await self.client.execute(operation: "ListEnvironmentProfiles", path: "/v2/domains/{domainIdentifier}/environment-profiles", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone environments. + public func listEnvironments(_ input: ListEnvironmentsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListEnvironmentsOutput { + return try await self.client.execute(operation: "ListEnvironments", path: "/v2/domains/{domainIdentifier}/environments", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all Amazon DataZone notifications. + public func listNotifications(_ input: ListNotificationsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListNotificationsOutput { + return try await self.client.execute(operation: "ListNotifications", path: "/v2/domains/{domainIdentifier}/notifications", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all members of the specified project. + public func listProjectMemberships(_ input: ListProjectMembershipsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListProjectMembershipsOutput { + return try await self.client.execute(operation: "ListProjectMemberships", path: "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/memberships", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone projects. + public func listProjects(_ input: ListProjectsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListProjectsOutput { + return try await self.client.execute(operation: "ListProjects", path: "/v2/domains/{domainIdentifier}/projects", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists subscription grants. + public func listSubscriptionGrants(_ input: ListSubscriptionGrantsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListSubscriptionGrantsOutput { + return try await self.client.execute(operation: "ListSubscriptionGrants", path: "/v2/domains/{domainIdentifier}/subscription-grants", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone subscription requests. + public func listSubscriptionRequests(_ input: ListSubscriptionRequestsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListSubscriptionRequestsOutput { + return try await self.client.execute(operation: "ListSubscriptionRequests", path: "/v2/domains/{domainIdentifier}/subscription-requests", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists subscription targets in Amazon DataZone. + public func listSubscriptionTargets(_ input: ListSubscriptionTargetsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListSubscriptionTargetsOutput { + return try await self.client.execute(operation: "ListSubscriptionTargets", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists subscriptions in Amazon DataZone. + public func listSubscriptions(_ input: ListSubscriptionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListSubscriptionsOutput { + return try await self.client.execute(operation: "ListSubscriptions", path: "/v2/domains/{domainIdentifier}/subscriptions", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists tags for the specified resource in Amazon DataZone. + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListTagsForResourceResponse { + return try await self.client.execute(operation: "ListTagsForResource", path: "/tags/{resourceArn}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Writes the configuration for the specified environment blueprint in Amazon DataZone. + public func putEnvironmentBlueprintConfiguration(_ input: PutEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> PutEnvironmentBlueprintConfigurationOutput { + return try await self.client.execute(operation: "PutEnvironmentBlueprintConfiguration", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Rejects automatically generated business-friendly metadata for your Amazon DataZone assets. + public func rejectPredictions(_ input: RejectPredictionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> RejectPredictionsOutput { + return try await self.client.execute(operation: "RejectPredictions", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/reject-predictions", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Rejects the specified subscription request. + public func rejectSubscriptionRequest(_ input: RejectSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> RejectSubscriptionRequestOutput { + return try await self.client.execute(operation: "RejectSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}/reject", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Revokes a specified subscription in Amazon DataZone. + public func revokeSubscription(_ input: RevokeSubscriptionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> RevokeSubscriptionOutput { + return try await self.client.execute(operation: "RevokeSubscription", path: "/v2/domains/{domainIdentifier}/subscriptions/{identifier}/revoke", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches for assets in Amazon DataZone. + public func search(_ input: SearchInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SearchOutput { + return try await self.client.execute(operation: "Search", path: "/v2/domains/{domainIdentifier}/search", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches group profiles in Amazon DataZone. + public func searchGroupProfiles(_ input: SearchGroupProfilesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SearchGroupProfilesOutput { + return try await self.client.execute(operation: "SearchGroupProfiles", path: "/v2/domains/{domainIdentifier}/search-group-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches listings in Amazon DataZone. + public func searchListings(_ input: SearchListingsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SearchListingsOutput { + return try await self.client.execute(operation: "SearchListings", path: "/v2/domains/{domainIdentifier}/listings/search", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches for types in Amazon DataZone. + public func searchTypes(_ input: SearchTypesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SearchTypesOutput { + return try await self.client.execute(operation: "SearchTypes", path: "/v2/domains/{domainIdentifier}/types-search", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches user profiles in Amazon DataZone. + public func searchUserProfiles(_ input: SearchUserProfilesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SearchUserProfilesOutput { + return try await self.client.execute(operation: "SearchUserProfiles", path: "/v2/domains/{domainIdentifier}/search-user-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Start the run of the specified data source in Amazon DataZone. + public func startDataSourceRun(_ input: StartDataSourceRunInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StartDataSourceRunOutput { + return try await self.client.execute(operation: "StartDataSourceRun", path: "/v2/domains/{domainIdentifier}/data-sources/{dataSourceIdentifier}/runs", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Tags a resource in Amazon DataZone. + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> TagResourceResponse { + return try await self.client.execute(operation: "TagResource", path: "/tags/{resourceArn}", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Untags a resource in Amazon DataZone. + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UntagResourceResponse { + return try await self.client.execute(operation: "UntagResource", path: "/tags/{resourceArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified data source in Amazon DataZone. + public func updateDataSource(_ input: UpdateDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateDataSourceOutput { + return try await self.client.execute(operation: "UpdateDataSource", path: "/v2/domains/{domainIdentifier}/data-sources/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates a Amazon DataZone domain. + public func updateDomain(_ input: UpdateDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateDomainOutput { + return try await self.client.execute(operation: "UpdateDomain", path: "/v2/domains/{identifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified environment in Amazon DataZone. + public func updateEnvironment(_ input: UpdateEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateEnvironmentOutput { + return try await self.client.execute(operation: "UpdateEnvironment", path: "/v2/domains/{domainIdentifier}/environments/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified environment profile in Amazon DataZone. + public func updateEnvironmentProfile(_ input: UpdateEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateEnvironmentProfileOutput { + return try await self.client.execute(operation: "UpdateEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the business glossary in Amazon DataZone. + public func updateGlossary(_ input: UpdateGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateGlossaryOutput { + return try await self.client.execute(operation: "UpdateGlossary", path: "/v2/domains/{domainIdentifier}/glossaries/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates a business glossary term in Amazon DataZone. + public func updateGlossaryTerm(_ input: UpdateGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateGlossaryTermOutput { + return try await self.client.execute(operation: "UpdateGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified group profile in Amazon DataZone. + public func updateGroupProfile(_ input: UpdateGroupProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateGroupProfileOutput { + return try await self.client.execute(operation: "UpdateGroupProfile", path: "/v2/domains/{domainIdentifier}/group-profiles/{groupIdentifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified project in Amazon DataZone. + public func updateProject(_ input: UpdateProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateProjectOutput { + return try await self.client.execute(operation: "UpdateProject", path: "/v2/domains/{domainIdentifier}/projects/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the status of the specified subscription grant status in Amazon DataZone. + public func updateSubscriptionGrantStatus(_ input: UpdateSubscriptionGrantStatusInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateSubscriptionGrantStatusOutput { + return try await self.client.execute(operation: "UpdateSubscriptionGrantStatus", path: "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}/status/{assetIdentifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates a specified subscription request in Amazon DataZone. + public func updateSubscriptionRequest(_ input: UpdateSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateSubscriptionRequestOutput { + return try await self.client.execute(operation: "UpdateSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified subscription target in Amazon DataZone. + public func updateSubscriptionTarget(_ input: UpdateSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateSubscriptionTargetOutput { + return try await self.client.execute(operation: "UpdateSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified user profile in Amazon DataZone. + public func updateUserProfile(_ input: UpdateUserProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateUserProfileOutput { + return try await self.client.execute(operation: "UpdateUserProfile", path: "/v2/domains/{domainIdentifier}/user-profiles/{userIdentifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension DataZone { + /// Lists data source run activities. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listDataSourceRunActivitiesPaginator( + _ input: ListDataSourceRunActivitiesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataSourceRunActivities, + inputKey: \ListDataSourceRunActivitiesInput.nextToken, + outputKey: \ListDataSourceRunActivitiesOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists data source runs in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listDataSourceRunsPaginator( + _ input: ListDataSourceRunsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataSourceRuns, + inputKey: \ListDataSourceRunsInput.nextToken, + outputKey: \ListDataSourceRunsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists data sources in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listDataSourcesPaginator( + _ input: ListDataSourcesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataSources, + inputKey: \ListDataSourcesInput.nextToken, + outputKey: \ListDataSourcesOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists Amazon DataZone domains. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listDomainsPaginator( + _ input: ListDomainsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDomains, + inputKey: \ListDomainsInput.nextToken, + outputKey: \ListDomainsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists blueprint configurations for a Amazon DataZone environment. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listEnvironmentBlueprintConfigurationsPaginator( + _ input: ListEnvironmentBlueprintConfigurationsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listEnvironmentBlueprintConfigurations, + inputKey: \ListEnvironmentBlueprintConfigurationsInput.nextToken, + outputKey: \ListEnvironmentBlueprintConfigurationsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists blueprints in an Amazon DataZone environment. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listEnvironmentBlueprintsPaginator( + _ input: ListEnvironmentBlueprintsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listEnvironmentBlueprints, + inputKey: \ListEnvironmentBlueprintsInput.nextToken, + outputKey: \ListEnvironmentBlueprintsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists Amazon DataZone environment profiles. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listEnvironmentProfilesPaginator( + _ input: ListEnvironmentProfilesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listEnvironmentProfiles, + inputKey: \ListEnvironmentProfilesInput.nextToken, + outputKey: \ListEnvironmentProfilesOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists Amazon DataZone environments. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listEnvironmentsPaginator( + _ input: ListEnvironmentsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listEnvironments, + inputKey: \ListEnvironmentsInput.nextToken, + outputKey: \ListEnvironmentsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists all Amazon DataZone notifications. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listNotificationsPaginator( + _ input: ListNotificationsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listNotifications, + inputKey: \ListNotificationsInput.nextToken, + outputKey: \ListNotificationsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists all members of the specified project. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listProjectMembershipsPaginator( + _ input: ListProjectMembershipsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listProjectMemberships, + inputKey: \ListProjectMembershipsInput.nextToken, + outputKey: \ListProjectMembershipsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists Amazon DataZone projects. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listProjectsPaginator( + _ input: ListProjectsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listProjects, + inputKey: \ListProjectsInput.nextToken, + outputKey: \ListProjectsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists subscription grants. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listSubscriptionGrantsPaginator( + _ input: ListSubscriptionGrantsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSubscriptionGrants, + inputKey: \ListSubscriptionGrantsInput.nextToken, + outputKey: \ListSubscriptionGrantsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists Amazon DataZone subscription requests. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listSubscriptionRequestsPaginator( + _ input: ListSubscriptionRequestsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSubscriptionRequests, + inputKey: \ListSubscriptionRequestsInput.nextToken, + outputKey: \ListSubscriptionRequestsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists subscription targets in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listSubscriptionTargetsPaginator( + _ input: ListSubscriptionTargetsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSubscriptionTargets, + inputKey: \ListSubscriptionTargetsInput.nextToken, + outputKey: \ListSubscriptionTargetsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists subscriptions in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listSubscriptionsPaginator( + _ input: ListSubscriptionsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSubscriptions, + inputKey: \ListSubscriptionsInput.nextToken, + outputKey: \ListSubscriptionsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Searches for assets in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func searchPaginator( + _ input: SearchInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.search, + inputKey: \SearchInput.nextToken, + outputKey: \SearchOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Searches group profiles in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func searchGroupProfilesPaginator( + _ input: SearchGroupProfilesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchGroupProfiles, + inputKey: \SearchGroupProfilesInput.nextToken, + outputKey: \SearchGroupProfilesOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Searches listings in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func searchListingsPaginator( + _ input: SearchListingsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchListings, + inputKey: \SearchListingsInput.nextToken, + outputKey: \SearchListingsOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Searches for types in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func searchTypesPaginator( + _ input: SearchTypesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchTypes, + inputKey: \SearchTypesInput.nextToken, + outputKey: \SearchTypesOutput.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Searches user profiles in Amazon DataZone. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func searchUserProfilesPaginator( + _ input: SearchUserProfilesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchUserProfiles, + inputKey: \SearchUserProfilesInput.nextToken, + outputKey: \SearchUserProfilesOutput.nextToken, + logger: logger, + on: eventLoop + ) + } +} diff --git a/Sources/Soto/Services/DataZone/DataZone_api.swift b/Sources/Soto/Services/DataZone/DataZone_api.swift new file mode 100644 index 0000000000..a4f755a61d --- /dev/null +++ b/Sources/Soto/Services/DataZone/DataZone_api.swift @@ -0,0 +1,1979 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS DataZone service. +/// +/// Amazon DataZone is a data management service that enables you to catalog, discover, govern, share, and analyze your data. With Amazon DataZone, you can share and access your data across accounts and supported regions. Amazon DataZone simplifies your experience across Amazon Web Services services, including, but not limited to, Amazon Redshift, Amazon Athena, Amazon Web Services Glue, and Amazon Web Services Lake Formation. +public struct DataZone: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the DataZone client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - timeout: Timeout value for HTTP requests + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + service: "datazone", + serviceProtocol: .restjson, + apiVersion: "2018-05-10", + endpoint: endpoint, + serviceEndpoints: [ + "af-south-1": "datazone.af-south-1.api.aws", + "ap-east-1": "datazone.ap-east-1.api.aws", + "ap-northeast-1": "datazone.ap-northeast-1.api.aws", + "ap-northeast-2": "datazone.ap-northeast-2.api.aws", + "ap-northeast-3": "datazone.ap-northeast-3.api.aws", + "ap-south-1": "datazone.ap-south-1.api.aws", + "ap-south-2": "datazone.ap-south-2.api.aws", + "ap-southeast-1": "datazone.ap-southeast-1.api.aws", + "ap-southeast-2": "datazone.ap-southeast-2.api.aws", + "ap-southeast-3": "datazone.ap-southeast-3.api.aws", + "ap-southeast-4": "datazone.ap-southeast-4.api.aws", + "ca-central-1": "datazone.ca-central-1.api.aws", + "cn-north-1": "datazone.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "datazone.eu-central-1.api.aws", + "eu-central-2": "datazone.eu-central-2.api.aws", + "eu-north-1": "datazone.eu-north-1.api.aws", + "eu-south-1": "datazone.eu-south-1.api.aws", + "eu-south-2": "datazone.eu-south-2.api.aws", + "eu-west-1": "datazone.eu-west-1.api.aws", + "eu-west-2": "datazone.eu-west-2.api.aws", + "eu-west-3": "datazone.eu-west-3.api.aws", + "il-central-1": "datazone.il-central-1.api.aws", + "me-central-1": "datazone.me-central-1.api.aws", + "me-south-1": "datazone.me-south-1.api.aws", + "sa-east-1": "datazone.sa-east-1.api.aws", + "us-east-1": "datazone.us-east-1.api.aws", + "us-east-2": "datazone.us-east-2.api.aws", + "us-gov-east-1": "datazone.us-gov-east-1.api.aws", + "us-gov-west-1": "datazone.us-gov-west-1.api.aws", + "us-west-1": "datazone.us-west-1.api.aws", + "us-west-2": "datazone.us-west-2.api.aws" + ], + variantEndpoints: [ + [.fips]: .init(endpoints: [ + "af-south-1": "datazone-fips.af-south-1.api.aws", + "ap-east-1": "datazone-fips.ap-east-1.api.aws", + "ap-northeast-1": "datazone-fips.ap-northeast-1.api.aws", + "ap-northeast-2": "datazone-fips.ap-northeast-2.api.aws", + "ap-northeast-3": "datazone-fips.ap-northeast-3.api.aws", + "ap-south-1": "datazone-fips.ap-south-1.api.aws", + "ap-south-2": "datazone-fips.ap-south-2.api.aws", + "ap-southeast-1": "datazone-fips.ap-southeast-1.api.aws", + "ap-southeast-2": "datazone-fips.ap-southeast-2.api.aws", + "ap-southeast-3": "datazone-fips.ap-southeast-3.api.aws", + "ap-southeast-4": "datazone-fips.ap-southeast-4.api.aws", + "ca-central-1": "datazone-fips.ca-central-1.amazonaws.com", + "cn-north-1": "datazone-fips.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "datazone-fips.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "datazone-fips.eu-central-1.api.aws", + "eu-central-2": "datazone-fips.eu-central-2.api.aws", + "eu-north-1": "datazone-fips.eu-north-1.api.aws", + "eu-south-1": "datazone-fips.eu-south-1.api.aws", + "eu-south-2": "datazone-fips.eu-south-2.api.aws", + "eu-west-1": "datazone-fips.eu-west-1.api.aws", + "eu-west-2": "datazone-fips.eu-west-2.api.aws", + "eu-west-3": "datazone-fips.eu-west-3.api.aws", + "il-central-1": "datazone-fips.il-central-1.api.aws", + "me-central-1": "datazone-fips.me-central-1.api.aws", + "me-south-1": "datazone-fips.me-south-1.api.aws", + "sa-east-1": "datazone-fips.sa-east-1.api.aws", + "us-east-1": "datazone-fips.us-east-1.amazonaws.com", + "us-east-2": "datazone-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "datazone-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "datazone-fips.us-gov-west-1.api.aws", + "us-west-1": "datazone-fips.us-west-1.api.aws", + "us-west-2": "datazone-fips.us-west-2.amazonaws.com" + ]) + ], + errorType: DataZoneErrorType.self, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + // MARK: API Calls + + /// Accepts automatically generated business-friendly metadata for your Amazon DataZone assets. + public func acceptPredictions(_ input: AcceptPredictionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "AcceptPredictions", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/accept-predictions", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Accepts a subscription request to a specific asset. + public func acceptSubscriptionRequest(_ input: AcceptSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "AcceptSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}/accept", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Cancels the subscription to the specified asset. + public func cancelSubscription(_ input: CancelSubscriptionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CancelSubscription", path: "/v2/domains/{domainIdentifier}/subscriptions/{identifier}/cancel", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an asset in Amazon DataZone catalog. + public func createAsset(_ input: CreateAssetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateAsset", path: "/v2/domains/{domainIdentifier}/assets", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a revision of the asset. + public func createAssetRevision(_ input: CreateAssetRevisionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateAssetRevision", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/revisions", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a custom asset type. + public func createAssetType(_ input: CreateAssetTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateAssetType", path: "/v2/domains/{domainIdentifier}/asset-types", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone data source. + public func createDataSource(_ input: CreateDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateDataSource", path: "/v2/domains/{domainIdentifier}/data-sources", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone domain. + public func createDomain(_ input: CreateDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateDomain", path: "/v2/domains", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Create an Amazon DataZone environment. + public func createEnvironment(_ input: CreateEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateEnvironment", path: "/v2/domains/{domainIdentifier}/environments", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone environment profile. + public func createEnvironmentProfile(_ input: CreateEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a metadata form type. + public func createFormType(_ input: CreateFormTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateFormType", path: "/v2/domains/{domainIdentifier}/form-types", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone business glossary. + public func createGlossary(_ input: CreateGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateGlossary", path: "/v2/domains/{domainIdentifier}/glossaries", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a business glossary term. + public func createGlossaryTerm(_ input: CreateGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a group profile in Amazon DataZone. + public func createGroupProfile(_ input: CreateGroupProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateGroupProfile", path: "/v2/domains/{domainIdentifier}/group-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + public func createListingChangeSet(_ input: CreateListingChangeSetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateListingChangeSet", path: "/v2/domains/{domainIdentifier}/listings/change-set", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates an Amazon DataZone project. + public func createProject(_ input: CreateProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateProject", path: "/v2/domains/{domainIdentifier}/projects", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a project membership in Amazon DataZone. + public func createProjectMembership(_ input: CreateProjectMembershipInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateProjectMembership", path: "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a subsscription grant in Amazon DataZone. + public func createSubscriptionGrant(_ input: CreateSubscriptionGrantInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateSubscriptionGrant", path: "/v2/domains/{domainIdentifier}/subscription-grants", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a subscription request in Amazon DataZone. + public func createSubscriptionRequest(_ input: CreateSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a subscription target in Amazon DataZone. + public func createSubscriptionTarget(_ input: CreateSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a user profile in Amazon DataZone. + public func createUserProfile(_ input: CreateUserProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateUserProfile", path: "/v2/domains/{domainIdentifier}/user-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Delets an asset in Amazon DataZone. + public func deleteAsset(_ input: DeleteAssetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteAsset", path: "/v2/domains/{domainIdentifier}/assets/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an asset type in Amazon DataZone. + public func deleteAssetType(_ input: DeleteAssetTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteAssetType", path: "/v2/domains/{domainIdentifier}/asset-types/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a data source in Amazon DataZone. + public func deleteDataSource(_ input: DeleteDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteDataSource", path: "/v2/domains/{domainIdentifier}/data-sources/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a Amazon DataZone domain. + public func deleteDomain(_ input: DeleteDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteDomain", path: "/v2/domains/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an environment in Amazon DataZone. + @discardableResult public func deleteEnvironment(_ input: DeleteEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteEnvironment", path: "/v2/domains/{domainIdentifier}/environments/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes the blueprint configuration in Amazon DataZone. + public func deleteEnvironmentBlueprintConfiguration(_ input: DeleteEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteEnvironmentBlueprintConfiguration", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an environment profile in Amazon DataZone. + @discardableResult public func deleteEnvironmentProfile(_ input: DeleteEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Delets and metadata form type in Amazon DataZone. + public func deleteFormType(_ input: DeleteFormTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteFormType", path: "/v2/domains/{domainIdentifier}/form-types/{formTypeIdentifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a business glossary in Amazon DataZone. + public func deleteGlossary(_ input: DeleteGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteGlossary", path: "/v2/domains/{domainIdentifier}/glossaries/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a business glossary term in Amazon DataZone. + public func deleteGlossaryTerm(_ input: DeleteGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + public func deleteListing(_ input: DeleteListingInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteListing", path: "/v2/domains/{domainIdentifier}/listings/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a project in Amazon DataZone. + public func deleteProject(_ input: DeleteProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteProject", path: "/v2/domains/{domainIdentifier}/projects/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes project membership in Amazon DataZone. + public func deleteProjectMembership(_ input: DeleteProjectMembershipInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteProjectMembership", path: "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/deleteMembership", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes and subscription grant in Amazon DataZone. + public func deleteSubscriptionGrant(_ input: DeleteSubscriptionGrantInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteSubscriptionGrant", path: "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a subscription request in Amazon DataZone. + @discardableResult public func deleteSubscriptionRequest(_ input: DeleteSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes a subscription target in Amazon DataZone. + @discardableResult public func deleteSubscriptionTarget(_ input: DeleteSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone asset. + public func getAsset(_ input: GetAssetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetAsset", path: "/v2/domains/{domainIdentifier}/assets/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone asset type. + public func getAssetType(_ input: GetAssetTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetAssetType", path: "/v2/domains/{domainIdentifier}/asset-types/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone data source. + public func getDataSource(_ input: GetDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetDataSource", path: "/v2/domains/{domainIdentifier}/data-sources/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone data source run. + public func getDataSourceRun(_ input: GetDataSourceRunInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetDataSourceRun", path: "/v2/domains/{domainIdentifier}/data-source-runs/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone domain. + public func getDomain(_ input: GetDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetDomain", path: "/v2/domains/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone environment. + public func getEnvironment(_ input: GetEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetEnvironment", path: "/v2/domains/{domainIdentifier}/environments/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an Amazon DataZone blueprint. + public func getEnvironmentBlueprint(_ input: GetEnvironmentBlueprintInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetEnvironmentBlueprint", path: "/v2/domains/{domainIdentifier}/environment-blueprints/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the blueprint configuration in Amazon DataZone. + public func getEnvironmentBlueprintConfiguration(_ input: GetEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetEnvironmentBlueprintConfiguration", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets an evinronment profile in Amazon DataZone. + public func getEnvironmentProfile(_ input: GetEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a metadata form type in Amazon DataZone. + public func getFormType(_ input: GetFormTypeInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetFormType", path: "/v2/domains/{domainIdentifier}/form-types/{formTypeIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a business glossary in Amazon DataZone. + public func getGlossary(_ input: GetGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetGlossary", path: "/v2/domains/{domainIdentifier}/glossaries/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a business glossary term in Amazon DataZone. + public func getGlossaryTerm(_ input: GetGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a group profile in Amazon DataZone. + public func getGroupProfile(_ input: GetGroupProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetGroupProfile", path: "/v2/domains/{domainIdentifier}/group-profiles/{groupIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the data portal URL for the specified Amazon DataZone domain. + public func getIamPortalLoginUrl(_ input: GetIamPortalLoginUrlInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetIamPortalLoginUrl", path: "/v2/domains/{domainIdentifier}/get-portal-login-url", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + public func getListing(_ input: GetListingInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetListing", path: "/v2/domains/{domainIdentifier}/listings/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a project in Amazon DataZone. + public func getProject(_ input: GetProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetProject", path: "/v2/domains/{domainIdentifier}/projects/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a subscription in Amazon DataZone. + public func getSubscription(_ input: GetSubscriptionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetSubscription", path: "/v2/domains/{domainIdentifier}/subscriptions/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the subscription grant in Amazon DataZone. + public func getSubscriptionGrant(_ input: GetSubscriptionGrantInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetSubscriptionGrant", path: "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the details of the specified subscription request. + public func getSubscriptionRequestDetails(_ input: GetSubscriptionRequestDetailsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetSubscriptionRequestDetails", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets the subscription target in Amazon DataZone. + public func getSubscriptionTarget(_ input: GetSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets a user profile in Amazon DataZone. + public func getUserProfile(_ input: GetUserProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetUserProfile", path: "/v2/domains/{domainIdentifier}/user-profiles/{userIdentifier}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists the revisions for the asset. + public func listAssetRevisions(_ input: ListAssetRevisionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListAssetRevisions", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/revisions", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists data source run activities. + public func listDataSourceRunActivities(_ input: ListDataSourceRunActivitiesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListDataSourceRunActivities", path: "/v2/domains/{domainIdentifier}/data-source-runs/{identifier}/activities", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists data source runs in Amazon DataZone. + public func listDataSourceRuns(_ input: ListDataSourceRunsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListDataSourceRuns", path: "/v2/domains/{domainIdentifier}/data-sources/{dataSourceIdentifier}/runs", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists data sources in Amazon DataZone. + public func listDataSources(_ input: ListDataSourcesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListDataSources", path: "/v2/domains/{domainIdentifier}/data-sources", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone domains. + public func listDomains(_ input: ListDomainsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListDomains", path: "/v2/domains", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists blueprint configurations for a Amazon DataZone environment. + public func listEnvironmentBlueprintConfigurations(_ input: ListEnvironmentBlueprintConfigurationsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListEnvironmentBlueprintConfigurations", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists blueprints in an Amazon DataZone environment. + public func listEnvironmentBlueprints(_ input: ListEnvironmentBlueprintsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListEnvironmentBlueprints", path: "/v2/domains/{domainIdentifier}/environment-blueprints", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone environment profiles. + public func listEnvironmentProfiles(_ input: ListEnvironmentProfilesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListEnvironmentProfiles", path: "/v2/domains/{domainIdentifier}/environment-profiles", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone environments. + public func listEnvironments(_ input: ListEnvironmentsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListEnvironments", path: "/v2/domains/{domainIdentifier}/environments", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all Amazon DataZone notifications. + public func listNotifications(_ input: ListNotificationsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListNotifications", path: "/v2/domains/{domainIdentifier}/notifications", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all members of the specified project. + public func listProjectMemberships(_ input: ListProjectMembershipsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListProjectMemberships", path: "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/memberships", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone projects. + public func listProjects(_ input: ListProjectsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListProjects", path: "/v2/domains/{domainIdentifier}/projects", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists subscription grants. + public func listSubscriptionGrants(_ input: ListSubscriptionGrantsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListSubscriptionGrants", path: "/v2/domains/{domainIdentifier}/subscription-grants", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists Amazon DataZone subscription requests. + public func listSubscriptionRequests(_ input: ListSubscriptionRequestsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListSubscriptionRequests", path: "/v2/domains/{domainIdentifier}/subscription-requests", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists subscription targets in Amazon DataZone. + public func listSubscriptionTargets(_ input: ListSubscriptionTargetsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListSubscriptionTargets", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists subscriptions in Amazon DataZone. + public func listSubscriptions(_ input: ListSubscriptionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListSubscriptions", path: "/v2/domains/{domainIdentifier}/subscriptions", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists tags for the specified resource in Amazon DataZone. + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListTagsForResource", path: "/tags/{resourceArn}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Writes the configuration for the specified environment blueprint in Amazon DataZone. + public func putEnvironmentBlueprintConfiguration(_ input: PutEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "PutEnvironmentBlueprintConfiguration", path: "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Rejects automatically generated business-friendly metadata for your Amazon DataZone assets. + public func rejectPredictions(_ input: RejectPredictionsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "RejectPredictions", path: "/v2/domains/{domainIdentifier}/assets/{identifier}/reject-predictions", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Rejects the specified subscription request. + public func rejectSubscriptionRequest(_ input: RejectSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "RejectSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}/reject", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Revokes a specified subscription in Amazon DataZone. + public func revokeSubscription(_ input: RevokeSubscriptionInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "RevokeSubscription", path: "/v2/domains/{domainIdentifier}/subscriptions/{identifier}/revoke", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches for assets in Amazon DataZone. + public func search(_ input: SearchInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "Search", path: "/v2/domains/{domainIdentifier}/search", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches group profiles in Amazon DataZone. + public func searchGroupProfiles(_ input: SearchGroupProfilesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "SearchGroupProfiles", path: "/v2/domains/{domainIdentifier}/search-group-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches listings in Amazon DataZone. + public func searchListings(_ input: SearchListingsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "SearchListings", path: "/v2/domains/{domainIdentifier}/listings/search", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches for types in Amazon DataZone. + public func searchTypes(_ input: SearchTypesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "SearchTypes", path: "/v2/domains/{domainIdentifier}/types-search", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Searches user profiles in Amazon DataZone. + public func searchUserProfiles(_ input: SearchUserProfilesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "SearchUserProfiles", path: "/v2/domains/{domainIdentifier}/search-user-profiles", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Start the run of the specified data source in Amazon DataZone. + public func startDataSourceRun(_ input: StartDataSourceRunInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "StartDataSourceRun", path: "/v2/domains/{domainIdentifier}/data-sources/{dataSourceIdentifier}/runs", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Tags a resource in Amazon DataZone. + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "TagResource", path: "/tags/{resourceArn}", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Untags a resource in Amazon DataZone. + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UntagResource", path: "/tags/{resourceArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified data source in Amazon DataZone. + public func updateDataSource(_ input: UpdateDataSourceInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateDataSource", path: "/v2/domains/{domainIdentifier}/data-sources/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates a Amazon DataZone domain. + public func updateDomain(_ input: UpdateDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateDomain", path: "/v2/domains/{identifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified environment in Amazon DataZone. + public func updateEnvironment(_ input: UpdateEnvironmentInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateEnvironment", path: "/v2/domains/{domainIdentifier}/environments/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified environment profile in Amazon DataZone. + public func updateEnvironmentProfile(_ input: UpdateEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateEnvironmentProfile", path: "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the business glossary in Amazon DataZone. + public func updateGlossary(_ input: UpdateGlossaryInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateGlossary", path: "/v2/domains/{domainIdentifier}/glossaries/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates a business glossary term in Amazon DataZone. + public func updateGlossaryTerm(_ input: UpdateGlossaryTermInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateGlossaryTerm", path: "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified group profile in Amazon DataZone. + public func updateGroupProfile(_ input: UpdateGroupProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateGroupProfile", path: "/v2/domains/{domainIdentifier}/group-profiles/{groupIdentifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified project in Amazon DataZone. + public func updateProject(_ input: UpdateProjectInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateProject", path: "/v2/domains/{domainIdentifier}/projects/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the status of the specified subscription grant status in Amazon DataZone. + public func updateSubscriptionGrantStatus(_ input: UpdateSubscriptionGrantStatusInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateSubscriptionGrantStatus", path: "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}/status/{assetIdentifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates a specified subscription request in Amazon DataZone. + public func updateSubscriptionRequest(_ input: UpdateSubscriptionRequestInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateSubscriptionRequest", path: "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified subscription target in Amazon DataZone. + public func updateSubscriptionTarget(_ input: UpdateSubscriptionTargetInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateSubscriptionTarget", path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Updates the specified user profile in Amazon DataZone. + public func updateUserProfile(_ input: UpdateUserProfileInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateUserProfile", path: "/v2/domains/{domainIdentifier}/user-profiles/{userIdentifier}", httpMethod: .PUT, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } +} + +extension DataZone { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are no public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: DataZone, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +extension DataZone { + /// Lists data source run activities. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listDataSourceRunActivitiesPaginator( + _ input: ListDataSourceRunActivitiesInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListDataSourceRunActivitiesOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listDataSourceRunActivities, + inputKey: \ListDataSourceRunActivitiesInput.nextToken, + outputKey: \ListDataSourceRunActivitiesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listDataSourceRunActivitiesPaginator( + _ input: ListDataSourceRunActivitiesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListDataSourceRunActivitiesOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listDataSourceRunActivities, + inputKey: \ListDataSourceRunActivitiesInput.nextToken, + outputKey: \ListDataSourceRunActivitiesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists data source runs in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listDataSourceRunsPaginator( + _ input: ListDataSourceRunsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListDataSourceRunsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listDataSourceRuns, + inputKey: \ListDataSourceRunsInput.nextToken, + outputKey: \ListDataSourceRunsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listDataSourceRunsPaginator( + _ input: ListDataSourceRunsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListDataSourceRunsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listDataSourceRuns, + inputKey: \ListDataSourceRunsInput.nextToken, + outputKey: \ListDataSourceRunsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists data sources in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listDataSourcesPaginator( + _ input: ListDataSourcesInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListDataSourcesOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listDataSources, + inputKey: \ListDataSourcesInput.nextToken, + outputKey: \ListDataSourcesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listDataSourcesPaginator( + _ input: ListDataSourcesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListDataSourcesOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listDataSources, + inputKey: \ListDataSourcesInput.nextToken, + outputKey: \ListDataSourcesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists Amazon DataZone domains. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listDomainsPaginator( + _ input: ListDomainsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListDomainsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listDomains, + inputKey: \ListDomainsInput.nextToken, + outputKey: \ListDomainsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listDomainsPaginator( + _ input: ListDomainsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListDomainsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listDomains, + inputKey: \ListDomainsInput.nextToken, + outputKey: \ListDomainsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists blueprint configurations for a Amazon DataZone environment. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listEnvironmentBlueprintConfigurationsPaginator( + _ input: ListEnvironmentBlueprintConfigurationsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListEnvironmentBlueprintConfigurationsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listEnvironmentBlueprintConfigurations, + inputKey: \ListEnvironmentBlueprintConfigurationsInput.nextToken, + outputKey: \ListEnvironmentBlueprintConfigurationsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listEnvironmentBlueprintConfigurationsPaginator( + _ input: ListEnvironmentBlueprintConfigurationsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListEnvironmentBlueprintConfigurationsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listEnvironmentBlueprintConfigurations, + inputKey: \ListEnvironmentBlueprintConfigurationsInput.nextToken, + outputKey: \ListEnvironmentBlueprintConfigurationsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists blueprints in an Amazon DataZone environment. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listEnvironmentBlueprintsPaginator( + _ input: ListEnvironmentBlueprintsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListEnvironmentBlueprintsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listEnvironmentBlueprints, + inputKey: \ListEnvironmentBlueprintsInput.nextToken, + outputKey: \ListEnvironmentBlueprintsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listEnvironmentBlueprintsPaginator( + _ input: ListEnvironmentBlueprintsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListEnvironmentBlueprintsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listEnvironmentBlueprints, + inputKey: \ListEnvironmentBlueprintsInput.nextToken, + outputKey: \ListEnvironmentBlueprintsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists Amazon DataZone environment profiles. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listEnvironmentProfilesPaginator( + _ input: ListEnvironmentProfilesInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListEnvironmentProfilesOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listEnvironmentProfiles, + inputKey: \ListEnvironmentProfilesInput.nextToken, + outputKey: \ListEnvironmentProfilesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listEnvironmentProfilesPaginator( + _ input: ListEnvironmentProfilesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListEnvironmentProfilesOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listEnvironmentProfiles, + inputKey: \ListEnvironmentProfilesInput.nextToken, + outputKey: \ListEnvironmentProfilesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists Amazon DataZone environments. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listEnvironmentsPaginator( + _ input: ListEnvironmentsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListEnvironmentsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listEnvironments, + inputKey: \ListEnvironmentsInput.nextToken, + outputKey: \ListEnvironmentsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listEnvironmentsPaginator( + _ input: ListEnvironmentsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListEnvironmentsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listEnvironments, + inputKey: \ListEnvironmentsInput.nextToken, + outputKey: \ListEnvironmentsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists all Amazon DataZone notifications. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listNotificationsPaginator( + _ input: ListNotificationsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListNotificationsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listNotifications, + inputKey: \ListNotificationsInput.nextToken, + outputKey: \ListNotificationsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listNotificationsPaginator( + _ input: ListNotificationsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListNotificationsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listNotifications, + inputKey: \ListNotificationsInput.nextToken, + outputKey: \ListNotificationsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists all members of the specified project. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listProjectMembershipsPaginator( + _ input: ListProjectMembershipsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListProjectMembershipsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listProjectMemberships, + inputKey: \ListProjectMembershipsInput.nextToken, + outputKey: \ListProjectMembershipsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listProjectMembershipsPaginator( + _ input: ListProjectMembershipsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListProjectMembershipsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listProjectMemberships, + inputKey: \ListProjectMembershipsInput.nextToken, + outputKey: \ListProjectMembershipsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists Amazon DataZone projects. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listProjectsPaginator( + _ input: ListProjectsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListProjectsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listProjects, + inputKey: \ListProjectsInput.nextToken, + outputKey: \ListProjectsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listProjectsPaginator( + _ input: ListProjectsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListProjectsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listProjects, + inputKey: \ListProjectsInput.nextToken, + outputKey: \ListProjectsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists subscription grants. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listSubscriptionGrantsPaginator( + _ input: ListSubscriptionGrantsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListSubscriptionGrantsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listSubscriptionGrants, + inputKey: \ListSubscriptionGrantsInput.nextToken, + outputKey: \ListSubscriptionGrantsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listSubscriptionGrantsPaginator( + _ input: ListSubscriptionGrantsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListSubscriptionGrantsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listSubscriptionGrants, + inputKey: \ListSubscriptionGrantsInput.nextToken, + outputKey: \ListSubscriptionGrantsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists Amazon DataZone subscription requests. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listSubscriptionRequestsPaginator( + _ input: ListSubscriptionRequestsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListSubscriptionRequestsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listSubscriptionRequests, + inputKey: \ListSubscriptionRequestsInput.nextToken, + outputKey: \ListSubscriptionRequestsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listSubscriptionRequestsPaginator( + _ input: ListSubscriptionRequestsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListSubscriptionRequestsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listSubscriptionRequests, + inputKey: \ListSubscriptionRequestsInput.nextToken, + outputKey: \ListSubscriptionRequestsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists subscription targets in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listSubscriptionTargetsPaginator( + _ input: ListSubscriptionTargetsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListSubscriptionTargetsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listSubscriptionTargets, + inputKey: \ListSubscriptionTargetsInput.nextToken, + outputKey: \ListSubscriptionTargetsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listSubscriptionTargetsPaginator( + _ input: ListSubscriptionTargetsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListSubscriptionTargetsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listSubscriptionTargets, + inputKey: \ListSubscriptionTargetsInput.nextToken, + outputKey: \ListSubscriptionTargetsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists subscriptions in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listSubscriptionsPaginator( + _ input: ListSubscriptionsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListSubscriptionsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listSubscriptions, + inputKey: \ListSubscriptionsInput.nextToken, + outputKey: \ListSubscriptionsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listSubscriptionsPaginator( + _ input: ListSubscriptionsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListSubscriptionsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listSubscriptions, + inputKey: \ListSubscriptionsInput.nextToken, + outputKey: \ListSubscriptionsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Searches for assets in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func searchPaginator( + _ input: SearchInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, SearchOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.search, + inputKey: \SearchInput.nextToken, + outputKey: \SearchOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func searchPaginator( + _ input: SearchInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (SearchOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.search, + inputKey: \SearchInput.nextToken, + outputKey: \SearchOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Searches group profiles in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func searchGroupProfilesPaginator( + _ input: SearchGroupProfilesInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, SearchGroupProfilesOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.searchGroupProfiles, + inputKey: \SearchGroupProfilesInput.nextToken, + outputKey: \SearchGroupProfilesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func searchGroupProfilesPaginator( + _ input: SearchGroupProfilesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (SearchGroupProfilesOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.searchGroupProfiles, + inputKey: \SearchGroupProfilesInput.nextToken, + outputKey: \SearchGroupProfilesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Searches listings in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func searchListingsPaginator( + _ input: SearchListingsInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, SearchListingsOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.searchListings, + inputKey: \SearchListingsInput.nextToken, + outputKey: \SearchListingsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func searchListingsPaginator( + _ input: SearchListingsInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (SearchListingsOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.searchListings, + inputKey: \SearchListingsInput.nextToken, + outputKey: \SearchListingsOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Searches for types in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func searchTypesPaginator( + _ input: SearchTypesInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, SearchTypesOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.searchTypes, + inputKey: \SearchTypesInput.nextToken, + outputKey: \SearchTypesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func searchTypesPaginator( + _ input: SearchTypesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (SearchTypesOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.searchTypes, + inputKey: \SearchTypesInput.nextToken, + outputKey: \SearchTypesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Searches user profiles in Amazon DataZone. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func searchUserProfilesPaginator( + _ input: SearchUserProfilesInput, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, SearchUserProfilesOutput, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.searchUserProfiles, + inputKey: \SearchUserProfilesInput.nextToken, + outputKey: \SearchUserProfilesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func searchUserProfilesPaginator( + _ input: SearchUserProfilesInput, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (SearchUserProfilesOutput, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.searchUserProfiles, + inputKey: \SearchUserProfilesInput.nextToken, + outputKey: \SearchUserProfilesOutput.nextToken, + on: eventLoop, + onPage: onPage + ) + } +} + +extension DataZone.ListDataSourceRunActivitiesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListDataSourceRunActivitiesInput { + return .init( + domainIdentifier: self.domainIdentifier, + identifier: self.identifier, + maxResults: self.maxResults, + nextToken: token, + status: self.status + ) + } +} + +extension DataZone.ListDataSourceRunsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListDataSourceRunsInput { + return .init( + dataSourceIdentifier: self.dataSourceIdentifier, + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + status: self.status + ) + } +} + +extension DataZone.ListDataSourcesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListDataSourcesInput { + return .init( + domainIdentifier: self.domainIdentifier, + environmentIdentifier: self.environmentIdentifier, + maxResults: self.maxResults, + name: self.name, + nextToken: token, + projectIdentifier: self.projectIdentifier, + status: self.status, + type: self.type + ) + } +} + +extension DataZone.ListDomainsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListDomainsInput { + return .init( + maxResults: self.maxResults, + nextToken: token, + status: self.status + ) + } +} + +extension DataZone.ListEnvironmentBlueprintConfigurationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListEnvironmentBlueprintConfigurationsInput { + return .init( + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension DataZone.ListEnvironmentBlueprintsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListEnvironmentBlueprintsInput { + return .init( + domainIdentifier: self.domainIdentifier, + managed: self.managed, + maxResults: self.maxResults, + name: self.name, + nextToken: token + ) + } +} + +extension DataZone.ListEnvironmentProfilesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListEnvironmentProfilesInput { + return .init( + awsAccountId: self.awsAccountId, + awsAccountRegion: self.awsAccountRegion, + domainIdentifier: self.domainIdentifier, + environmentBlueprintIdentifier: self.environmentBlueprintIdentifier, + maxResults: self.maxResults, + name: self.name, + nextToken: token, + projectIdentifier: self.projectIdentifier + ) + } +} + +extension DataZone.ListEnvironmentsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListEnvironmentsInput { + return .init( + awsAccountId: self.awsAccountId, + awsAccountRegion: self.awsAccountRegion, + domainIdentifier: self.domainIdentifier, + environmentBlueprintIdentifier: self.environmentBlueprintIdentifier, + environmentProfileIdentifier: self.environmentProfileIdentifier, + maxResults: self.maxResults, + name: self.name, + nextToken: token, + projectIdentifier: self.projectIdentifier, + provider: self.provider, + status: self.status + ) + } +} + +extension DataZone.ListNotificationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListNotificationsInput { + return .init( + afterTimestamp: self.afterTimestamp, + beforeTimestamp: self.beforeTimestamp, + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + subjects: self.subjects, + taskStatus: self.taskStatus, + type: self.type + ) + } +} + +extension DataZone.ListProjectMembershipsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListProjectMembershipsInput { + return .init( + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + projectIdentifier: self.projectIdentifier, + sortBy: self.sortBy, + sortOrder: self.sortOrder + ) + } +} + +extension DataZone.ListProjectsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListProjectsInput { + return .init( + domainIdentifier: self.domainIdentifier, + groupIdentifier: self.groupIdentifier, + maxResults: self.maxResults, + name: self.name, + nextToken: token, + userIdentifier: self.userIdentifier + ) + } +} + +extension DataZone.ListSubscriptionGrantsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListSubscriptionGrantsInput { + return .init( + domainIdentifier: self.domainIdentifier, + environmentId: self.environmentId, + maxResults: self.maxResults, + nextToken: token, + sortBy: self.sortBy, + sortOrder: self.sortOrder, + subscribedListingId: self.subscribedListingId, + subscriptionId: self.subscriptionId, + subscriptionTargetId: self.subscriptionTargetId + ) + } +} + +extension DataZone.ListSubscriptionRequestsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListSubscriptionRequestsInput { + return .init( + approverProjectId: self.approverProjectId, + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + owningProjectId: self.owningProjectId, + sortBy: self.sortBy, + sortOrder: self.sortOrder, + status: self.status, + subscribedListingId: self.subscribedListingId + ) + } +} + +extension DataZone.ListSubscriptionTargetsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListSubscriptionTargetsInput { + return .init( + domainIdentifier: self.domainIdentifier, + environmentIdentifier: self.environmentIdentifier, + maxResults: self.maxResults, + nextToken: token, + sortBy: self.sortBy, + sortOrder: self.sortOrder + ) + } +} + +extension DataZone.ListSubscriptionsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListSubscriptionsInput { + return .init( + approverProjectId: self.approverProjectId, + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + owningProjectId: self.owningProjectId, + sortBy: self.sortBy, + sortOrder: self.sortOrder, + status: self.status, + subscribedListingId: self.subscribedListingId, + subscriptionRequestIdentifier: self.subscriptionRequestIdentifier + ) + } +} + +extension DataZone.SearchGroupProfilesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.SearchGroupProfilesInput { + return .init( + domainIdentifier: self.domainIdentifier, + groupType: self.groupType, + maxResults: self.maxResults, + nextToken: token, + searchText: self.searchText + ) + } +} + +extension DataZone.SearchInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.SearchInput { + return .init( + additionalAttributes: self.additionalAttributes, + domainIdentifier: self.domainIdentifier, + filters: self.filters, + maxResults: self.maxResults, + nextToken: token, + owningProjectIdentifier: self.owningProjectIdentifier, + searchIn: self.searchIn, + searchScope: self.searchScope, + searchText: self.searchText, + sort: self.sort + ) + } +} + +extension DataZone.SearchListingsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.SearchListingsInput { + return .init( + additionalAttributes: self.additionalAttributes, + domainIdentifier: self.domainIdentifier, + filters: self.filters, + maxResults: self.maxResults, + nextToken: token, + searchIn: self.searchIn, + searchText: self.searchText, + sort: self.sort + ) + } +} + +extension DataZone.SearchTypesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.SearchTypesInput { + return .init( + domainIdentifier: self.domainIdentifier, + filters: self.filters, + managed: self.managed, + maxResults: self.maxResults, + nextToken: token, + searchIn: self.searchIn, + searchScope: self.searchScope, + searchText: self.searchText, + sort: self.sort + ) + } +} + +extension DataZone.SearchUserProfilesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.SearchUserProfilesInput { + return .init( + domainIdentifier: self.domainIdentifier, + maxResults: self.maxResults, + nextToken: token, + searchText: self.searchText, + userType: self.userType + ) + } +} diff --git a/Sources/Soto/Services/DataZone/DataZone_shapes.swift b/Sources/Soto/Services/DataZone/DataZone_shapes.swift new file mode 100644 index 0000000000..327910176f --- /dev/null +++ b/Sources/Soto/Services/DataZone/DataZone_shapes.swift @@ -0,0 +1,10773 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if compiler(>=5.7) && os(Linux) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +import SotoCore + +extension DataZone { + // MARK: Enums + + public enum AcceptRuleBehavior: String, CustomStringConvertible, Codable, Sendable { + case all = "ALL" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum AuthType: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case iamIdc = "IAM_IDC" + public var description: String { return self.rawValue } + } + + public enum ChangeAction: String, CustomStringConvertible, Codable, Sendable { + case publish = "PUBLISH" + case unpublish = "UNPUBLISH" + public var description: String { return self.rawValue } + } + + public enum ConfigurableActionTypeAuthorization: String, CustomStringConvertible, Codable, Sendable { + case https = "HTTPS" + case iam = "IAM" + public var description: String { return self.rawValue } + } + + public enum DataAssetActivityStatus: String, CustomStringConvertible, Codable, Sendable { + case failed = "FAILED" + case publishingFailed = "PUBLISHING_FAILED" + case skippedAlreadyImported = "SKIPPED_ALREADY_IMPORTED" + case skippedArchived = "SKIPPED_ARCHIVED" + case skippedNoAccess = "SKIPPED_NO_ACCESS" + case succeededCreated = "SUCCEEDED_CREATED" + case succeededUpdated = "SUCCEEDED_UPDATED" + case unchanged = "UNCHANGED" + public var description: String { return self.rawValue } + } + + public enum DataSourceErrorType: String, CustomStringConvertible, Codable, Sendable { + case accessDeniedException = "ACCESS_DENIED_EXCEPTION" + case conflictException = "CONFLICT_EXCEPTION" + case internalServerException = "INTERNAL_SERVER_EXCEPTION" + case resourceNotFoundException = "RESOURCE_NOT_FOUND_EXCEPTION" + case serviceQuotaExceededException = "SERVICE_QUOTA_EXCEEDED_EXCEPTION" + case throttlingException = "THROTTLING_EXCEPTION" + case validationException = "VALIDATION_EXCEPTION" + public var description: String { return self.rawValue } + } + + public enum DataSourceRunStatus: String, CustomStringConvertible, Codable, Sendable { + case failed = "FAILED" + case partiallySucceeded = "PARTIALLY_SUCCEEDED" + case requested = "REQUESTED" + case running = "RUNNING" + case success = "SUCCESS" + public var description: String { return self.rawValue } + } + + public enum DataSourceRunType: String, CustomStringConvertible, Codable, Sendable { + case prioritized = "PRIORITIZED" + case scheduled = "SCHEDULED" + public var description: String { return self.rawValue } + } + + public enum DataSourceStatus: String, CustomStringConvertible, Codable, Sendable { + case creating = "CREATING" + case deleting = "DELETING" + case failedCreation = "FAILED_CREATION" + case failedDeletion = "FAILED_DELETION" + case failedUpdate = "FAILED_UPDATE" + case ready = "READY" + case running = "RUNNING" + case updating = "UPDATING" + public var description: String { return self.rawValue } + } + + public enum DeploymentStatus: String, CustomStringConvertible, Codable, Sendable { + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case pendingDeployment = "PENDING_DEPLOYMENT" + case successful = "SUCCESSFUL" + public var description: String { return self.rawValue } + } + + public enum DeploymentType: String, CustomStringConvertible, Codable, Sendable { + case create = "CREATE" + case delete = "DELETE" + case update = "UPDATE" + public var description: String { return self.rawValue } + } + + public enum DomainStatus: String, CustomStringConvertible, Codable, Sendable { + case available = "AVAILABLE" + case creating = "CREATING" + case creationFailed = "CREATION_FAILED" + case deleted = "DELETED" + case deleting = "DELETING" + case deletionFailed = "DELETION_FAILED" + public var description: String { return self.rawValue } + } + + public enum EnableSetting: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum EntityType: String, CustomStringConvertible, Codable, Sendable { + case asset = "ASSET" + public var description: String { return self.rawValue } + } + + public enum EnvironmentStatus: String, CustomStringConvertible, Codable, Sendable { + case active = "ACTIVE" + case createFailed = "CREATE_FAILED" + case creating = "CREATING" + case deleteFailed = "DELETE_FAILED" + case deleted = "DELETED" + case deleting = "DELETING" + case disabled = "DISABLED" + case expired = "EXPIRED" + case inaccessible = "INACCESSIBLE" + case suspended = "SUSPENDED" + case updateFailed = "UPDATE_FAILED" + case updating = "UPDATING" + case validationFailed = "VALIDATION_FAILED" + public var description: String { return self.rawValue } + } + + public enum FilterExpressionType: String, CustomStringConvertible, Codable, Sendable { + case exclude = "EXCLUDE" + case include = "INCLUDE" + public var description: String { return self.rawValue } + } + + public enum FormTypeStatus: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum GlossaryStatus: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum GlossaryTermStatus: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum GroupProfileStatus: String, CustomStringConvertible, Codable, Sendable { + case assigned = "ASSIGNED" + case notAssigned = "NOT_ASSIGNED" + public var description: String { return self.rawValue } + } + + public enum GroupSearchType: String, CustomStringConvertible, Codable, Sendable { + case datazoneSsoGroup = "DATAZONE_SSO_GROUP" + case ssoGroup = "SSO_GROUP" + public var description: String { return self.rawValue } + } + + public enum InventorySearchScope: String, CustomStringConvertible, Codable, Sendable { + case asset = "ASSET" + case glossary = "GLOSSARY" + case glossaryTerm = "GLOSSARY_TERM" + public var description: String { return self.rawValue } + } + + public enum ListingStatus: String, CustomStringConvertible, Codable, Sendable { + case active = "ACTIVE" + case creating = "CREATING" + case inactive = "INACTIVE" + public var description: String { return self.rawValue } + } + + public enum NotificationResourceType: String, CustomStringConvertible, Codable, Sendable { + case project = "PROJECT" + public var description: String { return self.rawValue } + } + + public enum NotificationRole: String, CustomStringConvertible, Codable, Sendable { + case domainOwner = "DOMAIN_OWNER" + case projectContributor = "PROJECT_CONTRIBUTOR" + case projectOwner = "PROJECT_OWNER" + case projectSubscriber = "PROJECT_SUBSCRIBER" + case projectViewer = "PROJECT_VIEWER" + public var description: String { return self.rawValue } + } + + public enum NotificationType: String, CustomStringConvertible, Codable, Sendable { + case event = "EVENT" + case task = "TASK" + public var description: String { return self.rawValue } + } + + public enum RejectRuleBehavior: String, CustomStringConvertible, Codable, Sendable { + case all = "ALL" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum SearchOutputAdditionalAttribute: String, CustomStringConvertible, Codable, Sendable { + case forms = "FORMS" + public var description: String { return self.rawValue } + } + + public enum SortFieldProject: String, CustomStringConvertible, Codable, Sendable { + case name = "NAME" + public var description: String { return self.rawValue } + } + + public enum SortKey: String, CustomStringConvertible, Codable, Sendable { + case createdAt = "CREATED_AT" + case updatedAt = "UPDATED_AT" + public var description: String { return self.rawValue } + } + + public enum SortOrder: String, CustomStringConvertible, Codable, Sendable { + case ascending = "ASCENDING" + case descending = "DESCENDING" + public var description: String { return self.rawValue } + } + + public enum SubscriptionGrantOverallStatus: String, CustomStringConvertible, Codable, Sendable { + case completed = "COMPLETED" + case grantAndRevokeFailed = "GRANT_AND_REVOKE_FAILED" + case grantFailed = "GRANT_FAILED" + case inProgress = "IN_PROGRESS" + case inaccessible = "INACCESSIBLE" + case pending = "PENDING" + case revokeFailed = "REVOKE_FAILED" + public var description: String { return self.rawValue } + } + + public enum SubscriptionGrantStatus: String, CustomStringConvertible, Codable, Sendable { + case grantFailed = "GRANT_FAILED" + case grantInProgress = "GRANT_IN_PROGRESS" + case grantPending = "GRANT_PENDING" + case granted = "GRANTED" + case revokeFailed = "REVOKE_FAILED" + case revokeInProgress = "REVOKE_IN_PROGRESS" + case revokePending = "REVOKE_PENDING" + case revoked = "REVOKED" + public var description: String { return self.rawValue } + } + + public enum SubscriptionRequestStatus: String, CustomStringConvertible, Codable, Sendable { + case accepted = "ACCEPTED" + case pending = "PENDING" + case rejected = "REJECTED" + public var description: String { return self.rawValue } + } + + public enum SubscriptionStatus: String, CustomStringConvertible, Codable, Sendable { + case approved = "APPROVED" + case cancelled = "CANCELLED" + case revoked = "REVOKED" + public var description: String { return self.rawValue } + } + + public enum TaskStatus: String, CustomStringConvertible, Codable, Sendable { + case active = "ACTIVE" + case inactive = "INACTIVE" + public var description: String { return self.rawValue } + } + + public enum Timezone: String, CustomStringConvertible, Codable, Sendable { + case africaJohannesburg = "AFRICA_JOHANNESBURG" + case americaMontreal = "AMERICA_MONTREAL" + case americaSaoPaulo = "AMERICA_SAO_PAULO" + case asiaBahrain = "ASIA_BAHRAIN" + case asiaBangkok = "ASIA_BANGKOK" + case asiaCalcutta = "ASIA_CALCUTTA" + case asiaDubai = "ASIA_DUBAI" + case asiaHongKong = "ASIA_HONG_KONG" + case asiaJakarta = "ASIA_JAKARTA" + case asiaKualaLumpur = "ASIA_KUALA_LUMPUR" + case asiaSeoul = "ASIA_SEOUL" + case asiaShanghai = "ASIA_SHANGHAI" + case asiaSingapore = "ASIA_SINGAPORE" + case asiaTaipei = "ASIA_TAIPEI" + case asiaTokyo = "ASIA_TOKYO" + case australiaMelbourne = "AUSTRALIA_MELBOURNE" + case australiaSydney = "AUSTRALIA_SYDNEY" + case canadaCentral = "CANADA_CENTRAL" + case cet = "CET" + case cst6cdt = "CST6CDT" + case etcGmt = "ETC_GMT" + case etcGmt0 = "ETC_GMT0" + case etcGmtAdd0 = "ETC_GMT_ADD_0" + case etcGmtAdd1 = "ETC_GMT_ADD_1" + case etcGmtAdd10 = "ETC_GMT_ADD_10" + case etcGmtAdd11 = "ETC_GMT_ADD_11" + case etcGmtAdd12 = "ETC_GMT_ADD_12" + case etcGmtAdd2 = "ETC_GMT_ADD_2" + case etcGmtAdd3 = "ETC_GMT_ADD_3" + case etcGmtAdd4 = "ETC_GMT_ADD_4" + case etcGmtAdd5 = "ETC_GMT_ADD_5" + case etcGmtAdd6 = "ETC_GMT_ADD_6" + case etcGmtAdd7 = "ETC_GMT_ADD_7" + case etcGmtAdd8 = "ETC_GMT_ADD_8" + case etcGmtAdd9 = "ETC_GMT_ADD_9" + case etcGmtNeg0 = "ETC_GMT_NEG_0" + case etcGmtNeg1 = "ETC_GMT_NEG_1" + case etcGmtNeg10 = "ETC_GMT_NEG_10" + case etcGmtNeg11 = "ETC_GMT_NEG_11" + case etcGmtNeg12 = "ETC_GMT_NEG_12" + case etcGmtNeg13 = "ETC_GMT_NEG_13" + case etcGmtNeg14 = "ETC_GMT_NEG_14" + case etcGmtNeg2 = "ETC_GMT_NEG_2" + case etcGmtNeg3 = "ETC_GMT_NEG_3" + case etcGmtNeg4 = "ETC_GMT_NEG_4" + case etcGmtNeg5 = "ETC_GMT_NEG_5" + case etcGmtNeg6 = "ETC_GMT_NEG_6" + case etcGmtNeg7 = "ETC_GMT_NEG_7" + case etcGmtNeg8 = "ETC_GMT_NEG_8" + case etcGmtNeg9 = "ETC_GMT_NEG_9" + case europeDublin = "EUROPE_DUBLIN" + case europeLondon = "EUROPE_LONDON" + case europeParis = "EUROPE_PARIS" + case europeStockholm = "EUROPE_STOCKHOLM" + case europeZurich = "EUROPE_ZURICH" + case israel = "ISRAEL" + case mexicoGeneral = "MEXICO_GENERAL" + case mst7mdt = "MST7MDT" + case pacificAuckland = "PACIFIC_AUCKLAND" + case usCentral = "US_CENTRAL" + case usEastern = "US_EASTERN" + case usMountain = "US_MOUNTAIN" + case usPacific = "US_PACIFIC" + case utc = "UTC" + public var description: String { return self.rawValue } + } + + public enum TypesSearchScope: String, CustomStringConvertible, Codable, Sendable { + case assetType = "ASSET_TYPE" + case formType = "FORM_TYPE" + public var description: String { return self.rawValue } + } + + public enum UserAssignment: String, CustomStringConvertible, Codable, Sendable { + case automatic = "AUTOMATIC" + case manual = "MANUAL" + public var description: String { return self.rawValue } + } + + public enum UserDesignation: String, CustomStringConvertible, Codable, Sendable { + case projectContributor = "PROJECT_CONTRIBUTOR" + case projectOwner = "PROJECT_OWNER" + public var description: String { return self.rawValue } + } + + public enum UserProfileStatus: String, CustomStringConvertible, Codable, Sendable { + case activated = "ACTIVATED" + case assigned = "ASSIGNED" + case deactivated = "DEACTIVATED" + case notAssigned = "NOT_ASSIGNED" + public var description: String { return self.rawValue } + } + + public enum UserProfileType: String, CustomStringConvertible, Codable, Sendable { + case iam = "IAM" + case sso = "SSO" + public var description: String { return self.rawValue } + } + + public enum UserSearchType: String, CustomStringConvertible, Codable, Sendable { + case datazoneIamUser = "DATAZONE_IAM_USER" + case datazoneSsoUser = "DATAZONE_SSO_USER" + case datazoneUser = "DATAZONE_USER" + case ssoUser = "SSO_USER" + public var description: String { return self.rawValue } + } + + public enum UserType: String, CustomStringConvertible, Codable, Sendable { + case iamRole = "IAM_ROLE" + case iamUser = "IAM_USER" + case ssoUser = "SSO_USER" + public var description: String { return self.rawValue } + } + + public enum DataSourceConfigurationInput: AWSEncodableShape, Sendable { + /// The configuration of the Amazon Web Services Glue data source. + case glueRunConfiguration(GlueRunConfigurationInput) + /// The configuration of the Amazon Redshift data source. + case redshiftRunConfiguration(RedshiftRunConfigurationInput) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .glueRunConfiguration(let value): + try container.encode(value, forKey: .glueRunConfiguration) + case .redshiftRunConfiguration(let value): + try container.encode(value, forKey: .redshiftRunConfiguration) + } + } + + private enum CodingKeys: String, CodingKey { + case glueRunConfiguration = "glueRunConfiguration" + case redshiftRunConfiguration = "redshiftRunConfiguration" + } + } + + public enum DataSourceConfigurationOutput: AWSDecodableShape, Sendable { + /// The configuration of the Amazon Web Services Glue data source. + case glueRunConfiguration(GlueRunConfigurationOutput) + /// The configuration of the Amazon Redshift data source. + case redshiftRunConfiguration(RedshiftRunConfigurationOutput) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .glueRunConfiguration: + let value = try container.decode(GlueRunConfigurationOutput.self, forKey: .glueRunConfiguration) + self = .glueRunConfiguration(value) + case .redshiftRunConfiguration: + let value = try container.decode(RedshiftRunConfigurationOutput.self, forKey: .redshiftRunConfiguration) + self = .redshiftRunConfiguration(value) + } + } + + private enum CodingKeys: String, CodingKey { + case glueRunConfiguration = "glueRunConfiguration" + case redshiftRunConfiguration = "redshiftRunConfiguration" + } + } + + public enum FilterClause: AWSEncodableShape, Sendable { + /// The 'and' search filter clause in Amazon DataZone. + case and([FilterClause]) + /// A search filter in Amazon DataZone. + case filter(Filter) + /// The 'or' search filter clause in Amazon DataZone. + case or([FilterClause]) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .and(let value): + try container.encode(value, forKey: .and) + case .filter(let value): + try container.encode(value, forKey: .filter) + case .or(let value): + try container.encode(value, forKey: .or) + } + } + + public func validate(name: String) throws { + switch self { + case .and(let value): + try value.forEach { + try $0.validate(name: "\(name).and[]") + } + try self.validate(value, name: "and", parent: name, max: 100) + try self.validate(value, name: "and", parent: name, min: 1) + case .filter(let value): + try value.validate(name: "\(name).filter") + case .or(let value): + try value.forEach { + try $0.validate(name: "\(name).or[]") + } + try self.validate(value, name: "or", parent: name, max: 100) + try self.validate(value, name: "or", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case and = "and" + case filter = "filter" + case or = "or" + } + } + + public enum Member: AWSEncodableShape, Sendable { + /// The ID of the group of a project member. + case groupIdentifier(String) + /// The user ID of a project member. + case userIdentifier(String) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .groupIdentifier(let value): + try container.encode(value, forKey: .groupIdentifier) + case .userIdentifier(let value): + try container.encode(value, forKey: .userIdentifier) + } + } + + private enum CodingKeys: String, CodingKey { + case groupIdentifier = "groupIdentifier" + case userIdentifier = "userIdentifier" + } + } + + public enum MemberDetails: AWSDecodableShape, Sendable { + /// The group details of a project member. + case group(GroupDetails) + /// The user details of a project member. + case user(UserDetails) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .group: + let value = try container.decode(GroupDetails.self, forKey: .group) + self = .group(value) + case .user: + let value = try container.decode(UserDetails.self, forKey: .user) + self = .user(value) + } + } + + private enum CodingKeys: String, CodingKey { + case group = "group" + case user = "user" + } + } + + public enum RedshiftStorage: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The details of the Amazon Redshift cluster source. + case redshiftClusterSource(RedshiftClusterStorage) + /// The details of the Amazon Redshift Serverless workgroup source. + case redshiftServerlessSource(RedshiftServerlessStorage) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .redshiftClusterSource: + let value = try container.decode(RedshiftClusterStorage.self, forKey: .redshiftClusterSource) + self = .redshiftClusterSource(value) + case .redshiftServerlessSource: + let value = try container.decode(RedshiftServerlessStorage.self, forKey: .redshiftServerlessSource) + self = .redshiftServerlessSource(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .redshiftClusterSource(let value): + try container.encode(value, forKey: .redshiftClusterSource) + case .redshiftServerlessSource(let value): + try container.encode(value, forKey: .redshiftServerlessSource) + } + } + + private enum CodingKeys: String, CodingKey { + case redshiftClusterSource = "redshiftClusterSource" + case redshiftServerlessSource = "redshiftServerlessSource" + } + } + + public enum SearchInventoryResultItem: AWSDecodableShape, Sendable { + /// The asset item included in the search results. + case assetItem(AssetItem) + /// The data product item included in the search results. + case dataProductItem(DataProductSummary) + /// The glossary item included in the search results. + case glossaryItem(GlossaryItem) + /// The glossary term item included in the search results. + case glossaryTermItem(GlossaryTermItem) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .assetItem: + let value = try container.decode(AssetItem.self, forKey: .assetItem) + self = .assetItem(value) + case .dataProductItem: + let value = try container.decode(DataProductSummary.self, forKey: .dataProductItem) + self = .dataProductItem(value) + case .glossaryItem: + let value = try container.decode(GlossaryItem.self, forKey: .glossaryItem) + self = .glossaryItem(value) + case .glossaryTermItem: + let value = try container.decode(GlossaryTermItem.self, forKey: .glossaryTermItem) + self = .glossaryTermItem(value) + } + } + + private enum CodingKeys: String, CodingKey { + case assetItem = "assetItem" + case dataProductItem = "dataProductItem" + case glossaryItem = "glossaryItem" + case glossaryTermItem = "glossaryTermItem" + } + } + + public enum SearchTypesResultItem: AWSDecodableShape, Sendable { + /// The asset type included in the results of the SearchTypes action. + case assetTypeItem(AssetTypeItem) + /// The form type included in the results of the SearchTypes action. + case formTypeItem(FormTypeData) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .assetTypeItem: + let value = try container.decode(AssetTypeItem.self, forKey: .assetTypeItem) + self = .assetTypeItem(value) + case .formTypeItem: + let value = try container.decode(FormTypeData.self, forKey: .formTypeItem) + self = .formTypeItem(value) + } + } + + private enum CodingKeys: String, CodingKey { + case assetTypeItem = "assetTypeItem" + case formTypeItem = "formTypeItem" + } + } + + public enum UserProfileDetails: AWSDecodableShape, Sendable { + /// The IAM details included in the user profile details. + case iam(IamUserProfileDetails) + /// The single sign-on details included in the user profile details. + case sso(SsoUserProfileDetails) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .iam: + let value = try container.decode(IamUserProfileDetails.self, forKey: .iam) + self = .iam(value) + case .sso: + let value = try container.decode(SsoUserProfileDetails.self, forKey: .sso) + self = .sso(value) + } + } + + private enum CodingKeys: String, CodingKey { + case iam = "iam" + case sso = "sso" + } + } + + // MARK: Shapes + + public struct AcceptChoice: AWSEncodableShape { + /// Specifies the prediction (aka, the automatically generated piece of metadata) that can be accepted. + public let predictionChoice: Int? + /// Specifies the target (for example, a column name) where a prediction can be accepted. + public let predictionTarget: String? + + public init(predictionChoice: Int? = nil, predictionTarget: String? = nil) { + self.predictionChoice = predictionChoice + self.predictionTarget = predictionTarget + } + + private enum CodingKeys: String, CodingKey { + case predictionChoice = "predictionChoice" + case predictionTarget = "predictionTarget" + } + } + + public struct AcceptPredictionsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "revision", location: .querystring("revision")) + ] + + public let acceptChoices: [AcceptChoice]? + /// Specifies the rule (or the conditions) under which a prediction can be accepted. + public let acceptRule: AcceptRule? + /// A unique, case-sensitive identifier to ensure idempotency of the request. This field is automatically populated if not provided. + public let clientToken: String? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + public let identifier: String + public let revision: String? + + public init(acceptChoices: [AcceptChoice]? = nil, acceptRule: AcceptRule? = nil, clientToken: String? = AcceptPredictionsInput.idempotencyToken(), domainIdentifier: String, identifier: String, revision: String? = nil) { + self.acceptChoices = acceptChoices + self.acceptRule = acceptRule + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.revision = revision + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case acceptChoices = "acceptChoices" + case acceptRule = "acceptRule" + case clientToken = "clientToken" + } + } + + public struct AcceptPredictionsOutput: AWSDecodableShape { + public let assetId: String + public let domainId: String + public let revision: String + + public init(assetId: String, domainId: String, revision: String) { + self.assetId = assetId + self.domainId = domainId + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case domainId = "domainId" + case revision = "revision" + } + } + + public struct AcceptRule: AWSEncodableShape { + /// Specifies whether you want to accept the top prediction for all targets or none. + public let rule: AcceptRuleBehavior? + /// The confidence score that specifies the condition at which a prediction can be accepted. + public let threshold: Float? + + public init(rule: AcceptRuleBehavior? = nil, threshold: Float? = nil) { + self.rule = rule + self.threshold = threshold + } + + private enum CodingKeys: String, CodingKey { + case rule = "rule" + case threshold = "threshold" + } + } + + public struct AcceptSubscriptionRequestInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// A description that specifies the reason for accepting the specified subscription request. + public let decisionComment: String? + /// The Amazon DataZone domain where the specified subscription request is being accepted. + public let domainIdentifier: String + /// The unique identifier of the subscription request that is to be accepted. + public let identifier: String + + public init(decisionComment: String? = nil, domainIdentifier: String, identifier: String) { + self.decisionComment = decisionComment + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.decisionComment, name: "decisionComment", parent: name, max: 4096) + try self.validate(self.decisionComment, name: "decisionComment", parent: name, min: 1) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case decisionComment = "decisionComment" + } + } + + public struct AcceptSubscriptionRequestOutput: AWSDecodableShape { + /// The timestamp that specifies when the subscription request was accepted. + public let createdAt: Date + /// Specifies the Amazon DataZone user that accepted the specified subscription request. + public let createdBy: String + /// Specifies the reason for accepting the subscription request. + public let decisionComment: String? + /// The unique identifier of the Amazon DataZone domain where the specified subscription request was accepted. + public let domainId: String + /// The identifier of the subscription request. + public let id: String + /// Specifies the reason for requesting a subscription to the asset. + public let requestReason: String + /// Specifes the ID of the Amazon DataZone user who reviewed the subscription request. + public let reviewerId: String? + /// Specifies the status of the subscription request. + public let status: SubscriptionRequestStatus + /// Specifies the asset for which the subscription request was created. + public let subscribedListings: [SubscribedListing] + /// Specifies the Amazon DataZone users who are subscribed to the asset specified in the subscription request. + public let subscribedPrincipals: [SubscribedPrincipal] + /// Specifies the timestamp when subscription request was updated. + public let updatedAt: Date + /// Specifies the Amazon DataZone user who updated the subscription request. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, decisionComment: String? = nil, domainId: String, id: String, requestReason: String, reviewerId: String? = nil, status: SubscriptionRequestStatus, subscribedListings: [SubscribedListing], subscribedPrincipals: [SubscribedPrincipal], updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.decisionComment = decisionComment + self.domainId = domainId + self.id = id + self.requestReason = requestReason + self.reviewerId = reviewerId + self.status = status + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case decisionComment = "decisionComment" + case domainId = "domainId" + case id = "id" + case requestReason = "requestReason" + case reviewerId = "reviewerId" + case status = "status" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct AssetItem: AWSDecodableShape { + /// The additional attributes of a Amazon DataZone inventory asset. + public let additionalAttributes: AssetItemAdditionalAttributes? + /// The timestamp of when the Amazon DataZone inventory asset was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the inventory asset. + public let createdBy: String? + /// The description of an Amazon DataZone inventory asset. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the inventory asset exists. + public let domainId: String + /// The external identifier of the Amazon DataZone inventory asset. + public let externalIdentifier: String? + /// The timestamp of when the first revision of the inventory asset was created. + public let firstRevisionCreatedAt: Date? + /// The Amazon DataZone user who created the first revision of the inventory asset. + public let firstRevisionCreatedBy: String? + /// The glossary terms attached to the Amazon DataZone inventory asset. + public let glossaryTerms: [String]? + /// the identifier of the Amazon DataZone inventory asset. + public let identifier: String + /// The name of the Amazon DataZone inventory asset. + public let name: String + /// The identifier of the Amazon DataZone project that owns the inventory asset. + public let owningProjectId: String + /// The identifier of the asset type of the specified Amazon DataZone inventory asset. + public let typeIdentifier: String + /// The revision of the inventory asset type. + public let typeRevision: String + + public init(additionalAttributes: AssetItemAdditionalAttributes? = nil, createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, externalIdentifier: String? = nil, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, glossaryTerms: [String]? = nil, identifier: String, name: String, owningProjectId: String, typeIdentifier: String, typeRevision: String) { + self.additionalAttributes = additionalAttributes + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.externalIdentifier = externalIdentifier + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy + self.glossaryTerms = glossaryTerms + self.identifier = identifier + self.name = name + self.owningProjectId = owningProjectId + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case additionalAttributes = "additionalAttributes" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case externalIdentifier = "externalIdentifier" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" + case glossaryTerms = "glossaryTerms" + case identifier = "identifier" + case name = "name" + case owningProjectId = "owningProjectId" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct AssetItemAdditionalAttributes: AWSDecodableShape { + /// The forms included in the additional attributes of an inventory asset. + public let formsOutput: [FormOutput]? + /// The read-only forms included in the additional attributes of an inventory asset. + public let readOnlyFormsOutput: [FormOutput]? + + public init(formsOutput: [FormOutput]? = nil, readOnlyFormsOutput: [FormOutput]? = nil) { + self.formsOutput = formsOutput + self.readOnlyFormsOutput = readOnlyFormsOutput + } + + private enum CodingKeys: String, CodingKey { + case formsOutput = "formsOutput" + case readOnlyFormsOutput = "readOnlyFormsOutput" + } + } + + public struct AssetListing: AWSDecodableShape { + /// The identifier of an asset published in an Amazon DataZone catalog. + public let assetId: String? + /// The revision of an asset published in an Amazon DataZone catalog. + public let assetRevision: String? + /// The type of an asset published in an Amazon DataZone catalog. + public let assetType: String? + /// The timestamp of when an asset published in an Amazon DataZone catalog was created. + public let createdAt: Date? + /// The metadata forms attached to an asset published in an Amazon DataZone catalog. + public let forms: String? + /// The glossary terms attached to an asset published in an Amazon DataZone catalog. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The identifier of the project where an asset published in an Amazon DataZone catalog exists. + public let owningProjectId: String? + + public init(assetId: String? = nil, assetRevision: String? = nil, assetType: String? = nil, createdAt: Date? = nil, forms: String? = nil, glossaryTerms: [DetailedGlossaryTerm]? = nil, owningProjectId: String? = nil) { + self.assetId = assetId + self.assetRevision = assetRevision + self.assetType = assetType + self.createdAt = createdAt + self.forms = forms + self.glossaryTerms = glossaryTerms + self.owningProjectId = owningProjectId + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case assetRevision = "assetRevision" + case assetType = "assetType" + case createdAt = "createdAt" + case forms = "forms" + case glossaryTerms = "glossaryTerms" + case owningProjectId = "owningProjectId" + } + } + + public struct AssetListingDetails: AWSDecodableShape { + /// The identifier of an asset published in an Amazon DataZone catalog. + public let listingId: String + /// The status of an asset published in an Amazon DataZone catalog. + public let listingStatus: ListingStatus + + public init(listingId: String, listingStatus: ListingStatus) { + self.listingId = listingId + self.listingStatus = listingStatus + } + + private enum CodingKeys: String, CodingKey { + case listingId = "listingId" + case listingStatus = "listingStatus" + } + } + + public struct AssetListingItem: AWSDecodableShape { + /// The additional attributes of an asset published in an Amazon DataZone catalog. + public let additionalAttributes: AssetListingItemAdditionalAttributes? + /// The timestamp of when an asset published in an Amazon DataZone catalog was created. + public let createdAt: Date? + /// The description of an asset published in an Amazon DataZone catalog. + public let description: String? + /// The identifier of the inventory asset. + public let entityId: String? + /// The revision of the inventory asset. + public let entityRevision: String? + /// The type of the inventory asset. + public let entityType: String? + /// Glossary terms attached to the inventory asset. + public let glossaryTerms: [DetailedGlossaryTerm]? + /// The Amazon DataZone user who created the listing. + public let listingCreatedBy: String? + /// The identifier of the listing (asset published in Amazon DataZone catalog). + public let listingId: String? + /// The revision of the listing (asset published in Amazon DataZone catalog). + public let listingRevision: String? + /// The Amazon DataZone user who updated the listing. + public let listingUpdatedBy: String? + /// The name of the inventory asset. + public let name: String? + /// The identifier of the project that owns the inventory asset. + public let owningProjectId: String? + + public init(additionalAttributes: AssetListingItemAdditionalAttributes? = nil, createdAt: Date? = nil, description: String? = nil, entityId: String? = nil, entityRevision: String? = nil, entityType: String? = nil, glossaryTerms: [DetailedGlossaryTerm]? = nil, listingCreatedBy: String? = nil, listingId: String? = nil, listingRevision: String? = nil, listingUpdatedBy: String? = nil, name: String? = nil, owningProjectId: String? = nil) { + self.additionalAttributes = additionalAttributes + self.createdAt = createdAt + self.description = description + self.entityId = entityId + self.entityRevision = entityRevision + self.entityType = entityType + self.glossaryTerms = glossaryTerms + self.listingCreatedBy = listingCreatedBy + self.listingId = listingId + self.listingRevision = listingRevision + self.listingUpdatedBy = listingUpdatedBy + self.name = name + self.owningProjectId = owningProjectId + } + + private enum CodingKeys: String, CodingKey { + case additionalAttributes = "additionalAttributes" + case createdAt = "createdAt" + case description = "description" + case entityId = "entityId" + case entityRevision = "entityRevision" + case entityType = "entityType" + case glossaryTerms = "glossaryTerms" + case listingCreatedBy = "listingCreatedBy" + case listingId = "listingId" + case listingRevision = "listingRevision" + case listingUpdatedBy = "listingUpdatedBy" + case name = "name" + case owningProjectId = "owningProjectId" + } + } + + public struct AssetListingItemAdditionalAttributes: AWSDecodableShape { + /// The metadata forms that form additional attributes of the metadata asset. + public let forms: String? + + public init(forms: String? = nil) { + self.forms = forms + } + + private enum CodingKeys: String, CodingKey { + case forms = "forms" + } + } + + public struct AssetRevision: AWSDecodableShape { + /// The timestamp of when an inventory asset revison was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the asset revision. + public let createdBy: String? + /// The Amazon DataZone user who created the inventory asset. + public let domainId: String? + /// The identifier of the inventory asset revision. + public let id: String? + /// The revision details of the inventory asset. + public let revision: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, domainId: String? = nil, id: String? = nil, revision: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.id = id + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case id = "id" + case revision = "revision" + } + } + + public struct AssetTargetNameMap: AWSEncodableShape { + /// The identifier of the inventory asset. + public let assetId: String + /// The target name in the asset target name map. + public let targetName: String + + public init(assetId: String, targetName: String) { + self.assetId = assetId + self.targetName = targetName + } + + public func validate(name: String) throws { + try self.validate(self.assetId, name: "assetId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case targetName = "targetName" + } + } + + public struct AssetTypeItem: AWSDecodableShape { + /// The timestamp of when the asset type was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the asset type. + public let createdBy: String? + /// The description of the asset type. + public let description: String? + /// The identifier of the Amazon DataZone domain where the asset type exists. + public let domainId: String + /// The forms included in the details of the asset type. + public let formsOutput: [String: FormEntryOutput] + /// The name of the asset type. + public let name: String + /// The identifier of the Amazon DataZone domain where the asset type was originally created. + public let originDomainId: String? + /// The identifier of the Amazon DataZone project where the asset type exists. + public let originProjectId: String? + /// The identifier of the Amazon DataZone project that owns the asset type. + public let owningProjectId: String + /// The revision of the asset type. + public let revision: String + /// The timestamp of when the asset type was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the asset type. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.formsOutput = formsOutput + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case formsOutput = "formsOutput" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct BusinessNameGenerationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether the business name generation is enabled. + public let enabled: Bool? + + public init(enabled: Bool? = nil) { + self.enabled = enabled + } + + private enum CodingKeys: String, CodingKey { + case enabled = "enabled" + } + } + + public struct CancelSubscriptionInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The unique identifier of the Amazon DataZone domain where the subscription request is being cancelled. + public let domainIdentifier: String + /// The unique identifier of the subscription that is being cancelled. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct CancelSubscriptionOutput: AWSDecodableShape { + /// The timestamp that specifies when the request to cancel the subscription was created. + public let createdAt: Date + /// Specifies the Amazon DataZone user who is cancelling the subscription. + public let createdBy: String + /// The unique identifier of the Amazon DataZone domain where the subscription is being cancelled. + public let domainId: String + /// The identifier of the subscription. + public let id: String + /// Specifies whether the permissions to the asset are retained after the subscription is cancelled. + public let retainPermissions: Bool? + /// The status of the request to cancel the subscription. + public let status: SubscriptionStatus + /// The asset to which a subscription is being cancelled. + public let subscribedListing: SubscribedListing + /// The Amazon DataZone user who is made a subscriber to the specified asset by the subscription that is being cancelled. + public let subscribedPrincipal: SubscribedPrincipal + /// The unique ID of the subscripton request for the subscription that is being cancelled. + public let subscriptionRequestId: String? + /// The timestamp that specifies when the subscription was cancelled. + public let updatedAt: Date + /// The Amazon DataZone user that cancelled the subscription. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, domainId: String, id: String, retainPermissions: Bool? = nil, status: SubscriptionStatus, subscribedListing: SubscribedListing, subscribedPrincipal: SubscribedPrincipal, subscriptionRequestId: String? = nil, updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.id = id + self.retainPermissions = retainPermissions + self.status = status + self.subscribedListing = subscribedListing + self.subscribedPrincipal = subscribedPrincipal + self.subscriptionRequestId = subscriptionRequestId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case id = "id" + case retainPermissions = "retainPermissions" + case status = "status" + case subscribedListing = "subscribedListing" + case subscribedPrincipal = "subscribedPrincipal" + case subscriptionRequestId = "subscriptionRequestId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CloudFormationProperties: AWSDecodableShape { + /// The template URL of the cloud formation provisioning properties of the environment blueprint. + public let templateUrl: String + + public init(templateUrl: String) { + self.templateUrl = templateUrl + } + + private enum CodingKeys: String, CodingKey { + case templateUrl = "templateUrl" + } + } + + public struct ConfigurableActionParameter: AWSDecodableShape { + /// The key of the configurable action parameter. + public let key: String? + /// The value of the configurable action parameter. + public let value: String? + + public init(key: String? = nil, value: String? = nil) { + self.key = key + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case value = "value" + } + } + + public struct ConfigurableEnvironmentAction: AWSDecodableShape { + /// The authentication type of a configurable action of a Amazon DataZone environment. + public let auth: ConfigurableActionTypeAuthorization? + /// The parameters of a configurable action in a Amazon DataZone environment. + public let parameters: [ConfigurableActionParameter] + /// The type of a configurable action in a Amazon DataZone environment. + public let type: String + + public init(auth: ConfigurableActionTypeAuthorization? = nil, parameters: [ConfigurableActionParameter], type: String) { + self.auth = auth + self.parameters = parameters + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case auth = "auth" + case parameters = "parameters" + case type = "type" + } + } + + public struct CreateAssetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// Asset description. + public let description: String? + /// Amazon DataZone domain where the asset is created. + public let domainIdentifier: String + public let externalIdentifier: String? + /// Metadata forms attached to the asset. + public let formsInput: [FormInput]? + /// Glossary terms attached to the asset. + public let glossaryTerms: [String]? + /// Asset name. + public let name: String + /// The unique identifier of the project that owns this asset. + public let owningProjectIdentifier: String + /// The configuration of the automatically generated business-friendly metadata for the asset. + public let predictionConfiguration: PredictionConfiguration? + /// The unique identifier of this asset's type. + public let typeIdentifier: String + /// The revision of this asset's type. + public let typeRevision: String? + + public init(clientToken: String? = CreateAssetInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, externalIdentifier: String? = nil, formsInput: [FormInput]? = nil, glossaryTerms: [String]? = nil, name: String, owningProjectIdentifier: String, predictionConfiguration: PredictionConfiguration? = nil, typeIdentifier: String, typeRevision: String? = nil) { + self.clientToken = clientToken + self.description = description + self.domainIdentifier = domainIdentifier + self.externalIdentifier = externalIdentifier + self.formsInput = formsInput + self.glossaryTerms = glossaryTerms + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + self.predictionConfiguration = predictionConfiguration + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.externalIdentifier, name: "externalIdentifier", parent: name, max: 256) + try self.validate(self.externalIdentifier, name: "externalIdentifier", parent: name, min: 1) + try self.formsInput?.forEach { + try $0.validate(name: "\(name).formsInput[]") + } + try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, max: 385) + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, min: 1) + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + try self.validate(self.typeRevision, name: "typeRevision", parent: name, max: 64) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case externalIdentifier = "externalIdentifier" + case formsInput = "formsInput" + case glossaryTerms = "glossaryTerms" + case name = "name" + case owningProjectIdentifier = "owningProjectIdentifier" + case predictionConfiguration = "predictionConfiguration" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct CreateAssetOutput: AWSDecodableShape { + /// The timestamp of when the asset was created. + public let createdAt: Date? + /// The Amazon DataZone user that created this asset in the catalog. + public let createdBy: String? + /// The description of the created asset. + public let description: String? + /// The ID of the Amazon DataZone domain in which the asset was created. + public let domainId: String + public let externalIdentifier: String? + /// The timestamp of when the first revision of the asset took place. + public let firstRevisionCreatedAt: Date? + /// The Amazon DataZone user that made the first revision of the asset. + public let firstRevisionCreatedBy: String? + /// The metadata forms that are attached to the created asset. + public let formsOutput: [FormOutput] + /// The glossary terms that are attached to the created asset. + public let glossaryTerms: [String]? + /// The unique identifier of the created asset. + public let id: String + public let listing: AssetListingDetails? + /// The name of the created asset. + public let name: String + /// The ID of the Amazon DataZone project that owns the created asset. + public let owningProjectId: String + /// The configuration of the automatically generated business-friendly metadata for the asset. + public let predictionConfiguration: PredictionConfiguration? + /// The read-only metadata forms that are attached to the created asset. + public let readOnlyFormsOutput: [FormOutput]? + /// The revision of the asset. + public let revision: String + /// The identifier of the created asset type. + public let typeIdentifier: String + /// The revision type of the asset. + public let typeRevision: String + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, externalIdentifier: String? = nil, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput], glossaryTerms: [String]? = nil, id: String, listing: AssetListingDetails? = nil, name: String, owningProjectId: String, predictionConfiguration: PredictionConfiguration? = nil, readOnlyFormsOutput: [FormOutput]? = nil, revision: String, typeIdentifier: String, typeRevision: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.externalIdentifier = externalIdentifier + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy + self.formsOutput = formsOutput + self.glossaryTerms = glossaryTerms + self.id = id + self.listing = listing + self.name = name + self.owningProjectId = owningProjectId + self.predictionConfiguration = predictionConfiguration + self.readOnlyFormsOutput = readOnlyFormsOutput + self.revision = revision + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case externalIdentifier = "externalIdentifier" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" + case formsOutput = "formsOutput" + case glossaryTerms = "glossaryTerms" + case id = "id" + case listing = "listing" + case name = "name" + case owningProjectId = "owningProjectId" + case predictionConfiguration = "predictionConfiguration" + case readOnlyFormsOutput = "readOnlyFormsOutput" + case revision = "revision" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct CreateAssetRevisionInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The revised description of the asset. + public let description: String? + /// The unique identifier of the domain where the asset is being revised. + public let domainIdentifier: String + /// The metadata forms to be attached to the asset as part of asset revision. + public let formsInput: [FormInput]? + /// The glossary terms to be attached to the asset as part of asset revision. + public let glossaryTerms: [String]? + /// The identifier of the asset. + public let identifier: String + /// Te revised name of the asset. + public let name: String + /// The configuration of the automatically generated business-friendly metadata for the asset. + public let predictionConfiguration: PredictionConfiguration? + /// The revision type of the asset. + public let typeRevision: String? + + public init(clientToken: String? = CreateAssetRevisionInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, formsInput: [FormInput]? = nil, glossaryTerms: [String]? = nil, identifier: String, name: String, predictionConfiguration: PredictionConfiguration? = nil, typeRevision: String? = nil) { + self.clientToken = clientToken + self.description = description + self.domainIdentifier = domainIdentifier + self.formsInput = formsInput + self.glossaryTerms = glossaryTerms + self.identifier = identifier + self.name = name + self.predictionConfiguration = predictionConfiguration + self.typeRevision = typeRevision + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.formsInput?.forEach { + try $0.validate(name: "\(name).formsInput[]") + } + try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, max: 64) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case formsInput = "formsInput" + case glossaryTerms = "glossaryTerms" + case name = "name" + case predictionConfiguration = "predictionConfiguration" + case typeRevision = "typeRevision" + } + } + + public struct CreateAssetRevisionOutput: AWSDecodableShape { + /// The timestamp of when the asset revision occured. + public let createdAt: Date? + /// The Amazon DataZone user who performed the asset revision. + public let createdBy: String? + /// The revised asset description. + public let description: String? + /// The unique identifier of the Amazon DataZone domain where the asset was revised. + public let domainId: String + public let externalIdentifier: String? + /// The timestamp of when the first asset revision occured. + public let firstRevisionCreatedAt: Date? + /// The Amazon DataZone user who performed the first asset revision. + public let firstRevisionCreatedBy: String? + /// The metadata forms that were attached to the asset as part of the asset revision. + public let formsOutput: [FormOutput] + /// The glossary terms that were attached to the asset as part of asset revision. + public let glossaryTerms: [String]? + /// The unique identifier of the asset revision. + public let id: String + public let listing: AssetListingDetails? + /// The revised name of the asset. + public let name: String + /// The unique identifier of the revised project that owns the asset. + public let owningProjectId: String + /// The configuration of the automatically generated business-friendly metadata for the asset. + public let predictionConfiguration: PredictionConfiguration? + /// The read-only metadata forms that were attached to the asset as part of the asset revision. + public let readOnlyFormsOutput: [FormOutput]? + /// The revision of the asset. + public let revision: String + /// The identifier of the revision type. + public let typeIdentifier: String + /// The revision type of the asset. + public let typeRevision: String + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, externalIdentifier: String? = nil, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput], glossaryTerms: [String]? = nil, id: String, listing: AssetListingDetails? = nil, name: String, owningProjectId: String, predictionConfiguration: PredictionConfiguration? = nil, readOnlyFormsOutput: [FormOutput]? = nil, revision: String, typeIdentifier: String, typeRevision: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.externalIdentifier = externalIdentifier + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy + self.formsOutput = formsOutput + self.glossaryTerms = glossaryTerms + self.id = id + self.listing = listing + self.name = name + self.owningProjectId = owningProjectId + self.predictionConfiguration = predictionConfiguration + self.readOnlyFormsOutput = readOnlyFormsOutput + self.revision = revision + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case externalIdentifier = "externalIdentifier" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" + case formsOutput = "formsOutput" + case glossaryTerms = "glossaryTerms" + case id = "id" + case listing = "listing" + case name = "name" + case owningProjectId = "owningProjectId" + case predictionConfiguration = "predictionConfiguration" + case readOnlyFormsOutput = "readOnlyFormsOutput" + case revision = "revision" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct CreateAssetTypeInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The descripton of the custom asset type. + public let description: String? + /// The unique identifier of the Amazon DataZone domain where the custom asset type is being created. + public let domainIdentifier: String + /// The metadata forms that are to be attached to the custom asset type. + public let formsInput: [String: FormEntryInput] + /// The name of the custom asset type. + public let name: String + /// The identifier of the Amazon DataZone project that is to own the custom asset type. + public let owningProjectIdentifier: String + + public init(description: String? = nil, domainIdentifier: String, formsInput: [String: FormEntryInput], name: String, owningProjectIdentifier: String) { + self.description = description + self.domainIdentifier = domainIdentifier + self.formsInput = formsInput + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.formsInput.forEach { + try validate($0.key, name: "formsInput.key", parent: name, max: 128) + try validate($0.key, name: "formsInput.key", parent: name, min: 1) + try validate($0.key, name: "formsInput.key", parent: name, pattern: "^(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$") + try $0.value.validate(name: "\(name).formsInput[\"\($0.key)\"]") + } + try self.validate(self.formsInput, name: "formsInput", parent: name, max: 10) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\.]*") + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case formsInput = "formsInput" + case name = "name" + case owningProjectIdentifier = "owningProjectIdentifier" + } + } + + public struct CreateAssetTypeOutput: AWSDecodableShape { + /// The timestamp of when the asset type is to be created. + public let createdAt: Date? + /// The Amazon DataZone user who creates this custom asset type. + public let createdBy: String? + /// The description of the custom asset type. + public let description: String? + /// The ID of the Amazon DataZone domain in which the asset type was created. + public let domainId: String + /// The metadata forms that are attached to the asset type. + public let formsOutput: [String: FormEntryOutput] + /// The name of the asset type. + public let name: String + /// The ID of the Amazon DataZone domain where the asset type was originally created. + public let originDomainId: String? + /// The ID of the Amazon DataZone project where the asset type was originally created. + public let originProjectId: String? + /// The ID of the Amazon DataZone project that currently owns this asset type. + public let owningProjectId: String? + /// The revision of the custom asset type. + public let revision: String + /// The timestamp of when the custom type was created. + public let updatedAt: Date? + /// The Amazon DataZone user that created the custom asset type. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String? = nil, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.formsOutput = formsOutput + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case formsOutput = "formsOutput" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CreateDataSourceInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The metadata forms that are to be attached to the assets that this data source works with. + public let assetFormsInput: [FormInput]? + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// Specifies the configuration of the data source. It can be set to either glueRunConfiguration or redshiftRunConfiguration. + public let configuration: DataSourceConfigurationInput? + /// The description of the data source. + public let description: String? + /// The ID of the Amazon DataZone domain where the data source is created. + public let domainIdentifier: String + /// Specifies whether the data source is enabled. + public let enableSetting: EnableSetting? + /// The unique identifier of the Amazon DataZone environment to which the data source publishes assets. + public let environmentIdentifier: String + /// The name of the data source. + public let name: String + /// The identifier of the Amazon DataZone project in which you want to add this data source. + public let projectIdentifier: String + /// Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog. + public let publishOnImport: Bool? + /// Specifies whether the business name generation is to be enabled for this data source. + public let recommendation: RecommendationConfiguration? + /// The schedule of the data source runs. + public let schedule: ScheduleConfiguration? + /// The type of the data source. + public let type: String + + public init(assetFormsInput: [FormInput]? = nil, clientToken: String? = CreateDataSourceInput.idempotencyToken(), configuration: DataSourceConfigurationInput? = nil, description: String? = nil, domainIdentifier: String, enableSetting: EnableSetting? = nil, environmentIdentifier: String, name: String, projectIdentifier: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, type: String) { + self.assetFormsInput = assetFormsInput + self.clientToken = clientToken + self.configuration = configuration + self.description = description + self.domainIdentifier = domainIdentifier + self.enableSetting = enableSetting + self.environmentIdentifier = environmentIdentifier + self.name = name + self.projectIdentifier = projectIdentifier + self.publishOnImport = publishOnImport + self.recommendation = recommendation + self.schedule = schedule + self.type = type + } + + public func validate(name: String) throws { + try self.assetFormsInput?.forEach { + try $0.validate(name: "\(name).assetFormsInput[]") + } + try self.validate(self.assetFormsInput, name: "assetFormsInput", parent: name, max: 10) + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.schedule?.validate(name: "\(name).schedule") + try self.validate(self.type, name: "type", parent: name, max: 256) + try self.validate(self.type, name: "type", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case assetFormsInput = "assetFormsInput" + case clientToken = "clientToken" + case configuration = "configuration" + case description = "description" + case enableSetting = "enableSetting" + case environmentIdentifier = "environmentIdentifier" + case name = "name" + case projectIdentifier = "projectIdentifier" + case publishOnImport = "publishOnImport" + case recommendation = "recommendation" + case schedule = "schedule" + case type = "type" + } + } + + public struct CreateDataSourceOutput: AWSDecodableShape { + /// The metadata forms attached to the assets that this data source creates. + public let assetFormsOutput: [FormOutput]? + /// Specifies the configuration of the data source. It can be set to either glueRunConfiguration or redshiftRunConfiguration. + public let configuration: DataSourceConfigurationOutput? + /// The timestamp of when the data source was created. + @OptionalCustomCoding + public var createdAt: Date? + /// The description of the data source. + public let description: String? + /// The ID of the Amazon DataZone domain in which the data source is created. + public let domainId: String + /// Specifies whether the data source is enabled. + public let enableSetting: EnableSetting? + /// The unique identifier of the Amazon DataZone environment to which the data source publishes assets. + public let environmentId: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let errorMessage: DataSourceErrorMessage? + /// The unique identifier of the data source. + public let id: String + /// The timestamp that specifies when the data source was last run. + @OptionalCustomCoding + public var lastRunAt: Date? + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let lastRunErrorMessage: DataSourceErrorMessage? + /// The status of the last run of this data source. + public let lastRunStatus: DataSourceRunStatus? + /// The name of the data source. + public let name: String + /// The ID of the Amazon DataZone project to which the data source is added. + public let projectId: String + /// Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog. + public let publishOnImport: Bool? + /// Specifies whether the business name generation is to be enabled for this data source. + public let recommendation: RecommendationConfiguration? + /// The schedule of the data source runs. + public let schedule: ScheduleConfiguration? + /// The status of the data source. + public let status: DataSourceStatus? + /// The type of the data source. + public let type: String? + /// The timestamp of when the data source was updated. + @OptionalCustomCoding + public var updatedAt: Date? + + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + self.assetFormsOutput = assetFormsOutput + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.enableSetting = enableSetting + self.environmentId = environmentId + self.errorMessage = errorMessage + self.id = id + self.lastRunAt = lastRunAt + self.lastRunErrorMessage = lastRunErrorMessage + self.lastRunStatus = lastRunStatus + self.name = name + self.projectId = projectId + self.publishOnImport = publishOnImport + self.recommendation = recommendation + self.schedule = schedule + self.status = status + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case assetFormsOutput = "assetFormsOutput" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case enableSetting = "enableSetting" + case environmentId = "environmentId" + case errorMessage = "errorMessage" + case id = "id" + case lastRunAt = "lastRunAt" + case lastRunErrorMessage = "lastRunErrorMessage" + case lastRunStatus = "lastRunStatus" + case name = "name" + case projectId = "projectId" + case publishOnImport = "publishOnImport" + case recommendation = "recommendation" + case schedule = "schedule" + case status = "status" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct CreateDomainInput: AWSEncodableShape { + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The description of the Amazon DataZone domain. + public let description: String? + /// The domain execution role that is created when an Amazon DataZone domain is created. The domain execution role is created in the Amazon Web Services account that houses the Amazon DataZone domain. + public let domainExecutionRole: String + /// The identifier of the Amazon Web Services Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data. + public let kmsKeyIdentifier: String? + /// The name of the Amazon DataZone domain. + public let name: String + /// The single-sign on configuration of the Amazon DataZone domain. + public let singleSignOn: SingleSignOn? + /// The tags specified for the Amazon DataZone domain. + public let tags: [String: String]? + + public init(clientToken: String? = CreateDomainInput.idempotencyToken(), description: String? = nil, domainExecutionRole: String, kmsKeyIdentifier: String? = nil, name: String, singleSignOn: SingleSignOn? = nil, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.description = description + self.domainExecutionRole = domainExecutionRole + self.kmsKeyIdentifier = kmsKeyIdentifier + self.name = name + self.singleSignOn = singleSignOn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.domainExecutionRole, name: "domainExecutionRole", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, max: 1024) + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, min: 1) + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^[\\w \\.:/=+@-]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[\\w \\.:/=+@-]*$") + } + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case domainExecutionRole = "domainExecutionRole" + case kmsKeyIdentifier = "kmsKeyIdentifier" + case name = "name" + case singleSignOn = "singleSignOn" + case tags = "tags" + } + } + + public struct CreateDomainOutput: AWSDecodableShape { + /// The ARN of the Amazon DataZone domain. + public let arn: String? + /// The description of the Amazon DataZone domain. + public let description: String? + /// The domain execution role that is created when an Amazon DataZone domain is created. The domain execution role is created in the Amazon Web Services account that houses the Amazon DataZone domain. + public let domainExecutionRole: String? + /// The identifier of the Amazon DataZone domain. + public let id: String + /// The identifier of the Amazon Web Services Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data. + public let kmsKeyIdentifier: String? + /// The name of the Amazon DataZone domain. + public let name: String? + /// The URL of the data portal for this Amazon DataZone domain. + public let portalUrl: String? + /// The single-sign on configuration of the Amazon DataZone domain. + public let singleSignOn: SingleSignOn? + /// The status of the Amazon DataZone domain. + public let status: DomainStatus? + /// The tags specified for the Amazon DataZone domain. + public let tags: [String: String]? + + public init(arn: String? = nil, description: String? = nil, domainExecutionRole: String? = nil, id: String, kmsKeyIdentifier: String? = nil, name: String? = nil, portalUrl: String? = nil, singleSignOn: SingleSignOn? = nil, status: DomainStatus? = nil, tags: [String: String]? = nil) { + self.arn = arn + self.description = description + self.domainExecutionRole = domainExecutionRole + self.id = id + self.kmsKeyIdentifier = kmsKeyIdentifier + self.name = name + self.portalUrl = portalUrl + self.singleSignOn = singleSignOn + self.status = status + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case description = "description" + case domainExecutionRole = "domainExecutionRole" + case id = "id" + case kmsKeyIdentifier = "kmsKeyIdentifier" + case name = "name" + case portalUrl = "portalUrl" + case singleSignOn = "singleSignOn" + case status = "status" + case tags = "tags" + } + } + + public struct CreateEnvironmentInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The description of the Amazon DataZone environment. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the environment is created. + public let domainIdentifier: String + /// The identifier of the environment profile that is used to create this Amazon DataZone environment. + public let environmentProfileIdentifier: String + /// The glossary terms that can be used in this Amazon DataZone environment. + public let glossaryTerms: [String]? + /// The name of the Amazon DataZone environment. + public let name: String + /// The identifier of the Amazon DataZone project in which this environment is created. + public let projectIdentifier: String + /// The user parameters of this Amazon DataZone environment. + public let userParameters: [EnvironmentParameter]? + + public init(description: String? = nil, domainIdentifier: String, environmentProfileIdentifier: String, glossaryTerms: [String]? = nil, name: String, projectIdentifier: String, userParameters: [EnvironmentParameter]? = nil) { + self.description = description + self.domainIdentifier = domainIdentifier + self.environmentProfileIdentifier = environmentProfileIdentifier + self.glossaryTerms = glossaryTerms + self.name = name + self.projectIdentifier = projectIdentifier + self.userParameters = userParameters + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentProfileIdentifier, name: "environmentProfileIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case environmentProfileIdentifier = "environmentProfileIdentifier" + case glossaryTerms = "glossaryTerms" + case name = "name" + case projectIdentifier = "projectIdentifier" + case userParameters = "userParameters" + } + } + + public struct CreateEnvironmentOutput: AWSDecodableShape { + /// The Amazon Web Services account in which the Amazon DataZone environment is created. + public let awsAccountId: String? + /// The Amazon Web Services region in which the Amazon DataZone environment is created. + public let awsAccountRegion: String? + /// The timestamp of when the environment was created. + public let createdAt: Date? + /// The Amazon DataZone user who created this environment. + public let createdBy: String + /// The deployment properties of this Amazon DataZone environment. + public let deploymentProperties: DeploymentProperties? + /// The description of this Amazon DataZone environment. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the environment is created. + public let domainId: String + /// The configurable actions of this Amazon DataZone environment. + public let environmentActions: [ConfigurableEnvironmentAction]? + /// The ID of the blueprint with which this Amazon DataZone environment was created. + public let environmentBlueprintId: String? + /// The ID of the environment profile with which this Amazon DataZone environment was created. + public let environmentProfileId: String + /// The glossary terms that can be used in this Amazon DataZone environment. + public let glossaryTerms: [String]? + /// The ID of this Amazon DataZone environment. + public let id: String? + /// The details of the last deployment of this Amazon DataZone environment. + public let lastDeployment: Deployment? + /// The name of this environment. + public let name: String + /// The ID of the Amazon DataZone project in which this environment is created. + public let projectId: String + /// The provider of this Amazon DataZone environment. + public let provider: String + /// The provisioned resources of this Amazon DataZone environment. + public let provisionedResources: [Resource]? + /// The provisioning properties of this Amazon DataZone environment. + public let provisioningProperties: ProvisioningProperties? + /// The status of this Amazon DataZone environment. + public let status: EnvironmentStatus? + /// The timestamp of when this environment was updated. + public let updatedAt: Date? + /// The user parameters of this Amazon DataZone environment. + public let userParameters: [CustomParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.deploymentProperties = deploymentProperties + self.description = description + self.domainId = domainId + self.environmentActions = environmentActions + self.environmentBlueprintId = environmentBlueprintId + self.environmentProfileId = environmentProfileId + self.glossaryTerms = glossaryTerms + self.id = id + self.lastDeployment = lastDeployment + self.name = name + self.projectId = projectId + self.provider = provider + self.provisionedResources = provisionedResources + self.provisioningProperties = provisioningProperties + self.status = status + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case deploymentProperties = "deploymentProperties" + case description = "description" + case domainId = "domainId" + case environmentActions = "environmentActions" + case environmentBlueprintId = "environmentBlueprintId" + case environmentProfileId = "environmentProfileId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case lastDeployment = "lastDeployment" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case provisionedResources = "provisionedResources" + case provisioningProperties = "provisioningProperties" + case status = "status" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct CreateEnvironmentProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The Amazon Web Services account in which the Amazon DataZone environment is created. + public let awsAccountId: String? + /// The Amazon Web Services region in which this environment profile is created. + public let awsAccountRegion: String? + /// The description of this Amazon DataZone environment profile. + public let description: String? + /// The ID of the Amazon DataZone domain in which this environment profile is created. + public let domainIdentifier: String + /// The ID of the blueprint with which this environment profile is created. + public let environmentBlueprintIdentifier: String + /// The name of this Amazon DataZone environment profile. + public let name: String + /// The identifier of the project in which to create the environment profile. + public let projectIdentifier: String + /// The user parameters of this Amazon DataZone environment profile. + public let userParameters: [EnvironmentParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, description: String? = nil, domainIdentifier: String, environmentBlueprintIdentifier: String, name: String, projectIdentifier: String, userParameters: [EnvironmentParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.description = description + self.domainIdentifier = domainIdentifier + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier + self.name = name + self.projectIdentifier = projectIdentifier + self.userParameters = userParameters + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^\\d{12}$") + try self.validate(self.awsAccountRegion, name: "awsAccountRegion", parent: name, pattern: "^[a-z]{2}-[a-z]{4,10}-\\d$") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case description = "description" + case environmentBlueprintIdentifier = "environmentBlueprintIdentifier" + case name = "name" + case projectIdentifier = "projectIdentifier" + case userParameters = "userParameters" + } + } + + public struct CreateEnvironmentProfileOutput: AWSDecodableShape { + /// The Amazon Web Services account ID in which this Amazon DataZone environment profile is created. + public let awsAccountId: String? + /// The Amazon Web Services region in which this Amazon DataZone environment profile is created. + public let awsAccountRegion: String? + /// The timestamp of when this environment profile was created. + public let createdAt: Date? + /// The Amazon DataZone user who created this environment profile. + public let createdBy: String + /// The description of this Amazon DataZone environment profile. + public let description: String? + /// The ID of the Amazon DataZone domain in which this environment profile is created. + public let domainId: String + /// The ID of the blueprint with which this environment profile is created. + public let environmentBlueprintId: String + /// The ID of this Amazon DataZone environment profile. + public let id: String + /// The name of this Amazon DataZone environment profile. + public let name: String + /// The ID of the Amazon DataZone project in which this environment profile is created. + public let projectId: String? + /// The timestamp of when this environment profile was updated. + public let updatedAt: Date? + /// The user parameters of this Amazon DataZone environment profile. + public let userParameters: [CustomParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentBlueprintId: String, id: String, name: String, projectId: String? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.environmentBlueprintId = environmentBlueprintId + self.id = id + self.name = name + self.projectId = projectId + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case environmentBlueprintId = "environmentBlueprintId" + case id = "id" + case name = "name" + case projectId = "projectId" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct CreateFormTypeInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The description of this Amazon DataZone metadata form type. + public let description: String? + /// The ID of the Amazon DataZone domain in which this metadata form type is created. + public let domainIdentifier: String + /// The model of this Amazon DataZone metadata form type. + public let model: Model + /// The name of this Amazon DataZone metadata form type. + public let name: String + /// The ID of the Amazon DataZone project that owns this metadata form type. + public let owningProjectIdentifier: String + /// The status of this Amazon DataZone metadata form type. + public let status: FormTypeStatus? + + public init(description: String? = nil, domainIdentifier: String, model: Model, name: String, owningProjectIdentifier: String, status: FormTypeStatus? = nil) { + self.description = description + self.domainIdentifier = domainIdentifier + self.model = model + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.model.validate(name: "\(name).model") + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^(amazon.datazone.)?(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$") + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case model = "model" + case name = "name" + case owningProjectIdentifier = "owningProjectIdentifier" + case status = "status" + } + } + + public struct CreateFormTypeOutput: AWSDecodableShape { + /// The description of this Amazon DataZone metadata form type. + public let description: String? + /// The ID of the Amazon DataZone domain in which this metadata form type is created. + public let domainId: String + /// The name of this Amazon DataZone metadata form type. + public let name: String + /// The ID of the Amazon DataZone domain in which this metadata form type was originally created. + public let originDomainId: String? + /// The ID of the project in which this Amazon DataZone metadata form type was originally created. + public let originProjectId: String? + /// The ID of the project that owns this Amazon DataZone metadata form type. + public let owningProjectId: String? + /// The revision of this Amazon DataZone metadata form type. + public let revision: String + + public init(description: String? = nil, domainId: String, name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String? = nil, revision: String) { + self.description = description + self.domainId = domainId + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + } + } + + public struct CreateGlossaryInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The description of this business glossary. + public let description: String? + /// The ID of the Amazon DataZone domain in which this business glossary is created. + public let domainIdentifier: String + /// The name of this business glossary. + public let name: String + /// The ID of the project that currently owns business glossary. + public let owningProjectIdentifier: String + /// The status of this business glossary. + public let status: GlossaryStatus? + + public init(clientToken: String? = CreateGlossaryInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, name: String, owningProjectIdentifier: String, status: GlossaryStatus? = nil) { + self.clientToken = clientToken + self.description = description + self.domainIdentifier = domainIdentifier + self.name = name + self.owningProjectIdentifier = owningProjectIdentifier + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 4096) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case name = "name" + case owningProjectIdentifier = "owningProjectIdentifier" + case status = "status" + } + } + + public struct CreateGlossaryOutput: AWSDecodableShape { + /// The description of this business glossary. + public let description: String? + /// The ID of the Amazon DataZone domain in which this business glossary is created. + public let domainId: String + /// The ID of this business glossary. + public let id: String + /// The name of this business glossary. + public let name: String + /// The ID of the project that currently owns this business glossary. + public let owningProjectId: String + /// The status of this business glossary. + public let status: GlossaryStatus? + + public init(description: String? = nil, domainId: String, id: String, name: String, owningProjectId: String, status: GlossaryStatus? = nil) { + self.description = description + self.domainId = domainId + self.id = id + self.name = name + self.owningProjectId = owningProjectId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case id = "id" + case name = "name" + case owningProjectId = "owningProjectId" + case status = "status" + } + } + + public struct CreateGlossaryTermInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The ID of the Amazon DataZone domain in which this business glossary term is created. + public let domainIdentifier: String + /// The ID of the business glossary in which this term is created. + public let glossaryIdentifier: String + /// The long description of this business glossary term. + public let longDescription: String? + /// The name of this business glossary term. + public let name: String + /// The short description of this business glossary term. + public let shortDescription: String? + /// The status of this business glossary term. + public let status: GlossaryTermStatus? + /// The term relations of this business glossary term. + public let termRelations: TermRelations? + + public init(clientToken: String? = CreateGlossaryTermInput.idempotencyToken(), domainIdentifier: String, glossaryIdentifier: String, longDescription: String? = nil, name: String, shortDescription: String? = nil, status: GlossaryTermStatus? = nil, termRelations: TermRelations? = nil) { + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.glossaryIdentifier = glossaryIdentifier + self.longDescription = longDescription + self.name = name + self.shortDescription = shortDescription + self.status = status + self.termRelations = termRelations + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.glossaryIdentifier, name: "glossaryIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.longDescription, name: "longDescription", parent: name, max: 4096) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.shortDescription, name: "shortDescription", parent: name, max: 1024) + try self.termRelations?.validate(name: "\(name).termRelations") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case glossaryIdentifier = "glossaryIdentifier" + case longDescription = "longDescription" + case name = "name" + case shortDescription = "shortDescription" + case status = "status" + case termRelations = "termRelations" + } + } + + public struct CreateGlossaryTermOutput: AWSDecodableShape { + /// The ID of the Amazon DataZone domain in which this business glossary term is created. + public let domainId: String + /// The ID of the business glossary in which this term is created. + public let glossaryId: String + /// The ID of this business glossary term. + public let id: String + /// The long description of this business glossary term. + public let longDescription: String? + /// The name of this business glossary term. + public let name: String + /// The short description of this business glossary term. + public let shortDescription: String? + /// The status of this business glossary term. + public let status: GlossaryTermStatus + /// The term relations of this business glossary term. + public let termRelations: TermRelations? + + public init(domainId: String, glossaryId: String, id: String, longDescription: String? = nil, name: String, shortDescription: String? = nil, status: GlossaryTermStatus, termRelations: TermRelations? = nil) { + self.domainId = domainId + self.glossaryId = glossaryId + self.id = id + self.longDescription = longDescription + self.name = name + self.shortDescription = shortDescription + self.status = status + self.termRelations = termRelations + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case glossaryId = "glossaryId" + case id = "id" + case longDescription = "longDescription" + case name = "name" + case shortDescription = "shortDescription" + case status = "status" + case termRelations = "termRelations" + } + } + + public struct CreateGroupProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The identifier of the Amazon DataZone domain in which the group profile is created. + public let domainIdentifier: String + /// The identifier of the group for which the group profile is created. + public let groupIdentifier: String + + public init(clientToken: String? = CreateGroupProfileInput.idempotencyToken(), domainIdentifier: String, groupIdentifier: String) { + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.groupIdentifier = groupIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.groupIdentifier, name: "groupIdentifier", parent: name, pattern: "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r ]+)") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case groupIdentifier = "groupIdentifier" + } + } + + public struct CreateGroupProfileOutput: AWSDecodableShape { + /// The identifier of the Amazon DataZone domain in which the group profile is created. + public let domainId: String? + /// The name of the group for which group profile is created. + public let groupName: String? + /// The identifier of the group profile. + public let id: String? + /// The status of the group profile. + public let status: GroupProfileStatus? + + public init(domainId: String? = nil, groupName: String? = nil, id: String? = nil, status: GroupProfileStatus? = nil) { + self.domainId = domainId + self.groupName = groupName + self.id = id + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case groupName = "groupName" + case id = "id" + case status = "status" + } + } + + public struct CreateListingChangeSetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + public let action: ChangeAction + public let clientToken: String? + public let domainIdentifier: String + public let entityIdentifier: String + public let entityRevision: String? + public let entityType: EntityType + + public init(action: ChangeAction, clientToken: String? = CreateListingChangeSetInput.idempotencyToken(), domainIdentifier: String, entityIdentifier: String, entityRevision: String? = nil, entityType: EntityType) { + self.action = action + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.entityIdentifier = entityIdentifier + self.entityRevision = entityRevision + self.entityType = entityType + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.entityRevision, name: "entityRevision", parent: name, max: 64) + try self.validate(self.entityRevision, name: "entityRevision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case clientToken = "clientToken" + case entityIdentifier = "entityIdentifier" + case entityRevision = "entityRevision" + case entityType = "entityType" + } + } + + public struct CreateListingChangeSetOutput: AWSDecodableShape { + public let listingId: String + public let listingRevision: String + public let status: ListingStatus + + public init(listingId: String, listingRevision: String, status: ListingStatus) { + self.listingId = listingId + self.listingRevision = listingRevision + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case listingId = "listingId" + case listingRevision = "listingRevision" + case status = "status" + } + } + + public struct CreateProjectInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The description of the Amazon DataZone project. + public let description: String? + /// The ID of the Amazon DataZone domain in which this project is created. + public let domainIdentifier: String + /// The glossary terms that can be used in this Amazon DataZone project. + public let glossaryTerms: [String]? + /// The name of the Amazon DataZone project. + public let name: String + + public init(description: String? = nil, domainIdentifier: String, glossaryTerms: [String]? = nil, name: String) { + self.description = description + self.domainIdentifier = domainIdentifier + self.glossaryTerms = glossaryTerms + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case glossaryTerms = "glossaryTerms" + case name = "name" + } + } + + public struct CreateProjectMembershipInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "projectIdentifier", location: .uri("projectIdentifier")) + ] + + /// The designation of the project membership. + public let designation: UserDesignation + /// The ID of the Amazon DataZone domain in which project membership is created. + public let domainIdentifier: String + /// The project member whose project membership was created. + public let member: Member + /// The ID of the project for which this project membership was created. + public let projectIdentifier: String + + public init(designation: UserDesignation, domainIdentifier: String, member: Member, projectIdentifier: String) { + self.designation = designation + self.domainIdentifier = domainIdentifier + self.member = member + self.projectIdentifier = projectIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case designation = "designation" + case member = "member" + } + } + + public struct CreateProjectMembershipOutput: AWSDecodableShape { + public init() {} + } + + public struct CreateProjectOutput: AWSDecodableShape { + /// The timestamp of when the project was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the project. + public let createdBy: String + /// The description of the project. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the project was created. + public let domainId: String + /// The glossary terms that can be used in the project. + public let glossaryTerms: [String]? + /// The ID of the Amazon DataZone project. + public let id: String + /// The timestamp of when the project was last updated. + public let lastUpdatedAt: Date? + /// The name of the project. + public let name: String + + public init(createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, glossaryTerms: [String]? = nil, id: String, lastUpdatedAt: Date? = nil, name: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.glossaryTerms = glossaryTerms + self.id = id + self.lastUpdatedAt = lastUpdatedAt + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case lastUpdatedAt = "lastUpdatedAt" + case name = "name" + } + } + + public struct CreateSubscriptionGrantInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The names of the assets for which the subscription grant is created. + public let assetTargetNames: [AssetTargetNameMap]? + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The ID of the Amazon DataZone domain in which the subscription grant is created. + public let domainIdentifier: String + /// The ID of the environment in which the subscription grant is created. + public let environmentIdentifier: String + /// The entity to which the subscription is to be granted. + public let grantedEntity: GrantedEntityInput + /// The ID of the subscription target for which the subscription grant is created. + public let subscriptionTargetIdentifier: String + + public init(assetTargetNames: [AssetTargetNameMap]? = nil, clientToken: String? = CreateSubscriptionGrantInput.idempotencyToken(), domainIdentifier: String, environmentIdentifier: String, grantedEntity: GrantedEntityInput, subscriptionTargetIdentifier: String) { + self.assetTargetNames = assetTargetNames + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.grantedEntity = grantedEntity + self.subscriptionTargetIdentifier = subscriptionTargetIdentifier + } + + public func validate(name: String) throws { + try self.assetTargetNames?.forEach { + try $0.validate(name: "\(name).assetTargetNames[]") + } + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.grantedEntity.validate(name: "\(name).grantedEntity") + try self.validate(self.subscriptionTargetIdentifier, name: "subscriptionTargetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case assetTargetNames = "assetTargetNames" + case clientToken = "clientToken" + case environmentIdentifier = "environmentIdentifier" + case grantedEntity = "grantedEntity" + case subscriptionTargetIdentifier = "subscriptionTargetIdentifier" + } + } + + public struct CreateSubscriptionGrantOutput: AWSDecodableShape { + /// The assets for which the subscription grant is created. + public let assets: [SubscribedAsset]? + /// A timestamp of when the subscription grant is created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription grant. + public let createdBy: String + /// The ID of the Amazon DataZone domain in which the subscription grant is created. + public let domainId: String + /// The entity to which the subscription is granted. + public let grantedEntity: GrantedEntity + /// The ID of the subscription grant. + public let id: String + /// The status of the subscription grant. + public let status: SubscriptionGrantOverallStatus + /// The identifier of the subscription grant. + public let subscriptionId: String? + /// The ID of the subscription target for which the subscription grant is created. + public let subscriptionTargetId: String + /// A timestamp of when the subscription grant was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription grant. + public let updatedBy: String? + + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = subscriptionId + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case assets = "assets" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case grantedEntity = "grantedEntity" + case id = "id" + case status = "status" + case subscriptionId = "subscriptionId" + case subscriptionTargetId = "subscriptionTargetId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CreateSubscriptionRequestInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The ID of the Amazon DataZone domain in which the subscription request is created. + public let domainIdentifier: String + /// The reason for the subscription request. + public let requestReason: String + public let subscribedListings: [SubscribedListingInput] + /// The Amazon DataZone principals for whom the subscription request is created. + public let subscribedPrincipals: [SubscribedPrincipalInput] + + public init(clientToken: String? = CreateSubscriptionRequestInput.idempotencyToken(), domainIdentifier: String, requestReason: String, subscribedListings: [SubscribedListingInput], subscribedPrincipals: [SubscribedPrincipalInput]) { + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.requestReason = requestReason + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.requestReason, name: "requestReason", parent: name, max: 4096) + try self.validate(self.requestReason, name: "requestReason", parent: name, min: 1) + try self.subscribedListings.forEach { + try $0.validate(name: "\(name).subscribedListings[]") + } + try self.validate(self.subscribedListings, name: "subscribedListings", parent: name, max: 1) + try self.validate(self.subscribedListings, name: "subscribedListings", parent: name, min: 1) + try self.subscribedPrincipals.forEach { + try $0.validate(name: "\(name).subscribedPrincipals[]") + } + try self.validate(self.subscribedPrincipals, name: "subscribedPrincipals", parent: name, max: 1) + try self.validate(self.subscribedPrincipals, name: "subscribedPrincipals", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case requestReason = "requestReason" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + } + } + + public struct CreateSubscriptionRequestOutput: AWSDecodableShape { + /// A timestamp of when the subscription request is created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription request. + public let createdBy: String + /// The decision comment of the subscription request. + public let decisionComment: String? + /// The ID of the Amazon DataZone domain in whcih the subscription request is created. + public let domainId: String + /// The ID of the subscription request. + public let id: String + /// The reason for the subscription request. + public let requestReason: String + /// The ID of the reviewer of the subscription request. + public let reviewerId: String? + /// The status of the subscription request. + public let status: SubscriptionRequestStatus + public let subscribedListings: [SubscribedListing] + /// The subscribed principals of the subscription request. + public let subscribedPrincipals: [SubscribedPrincipal] + /// The timestamp of when the subscription request was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription request. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, decisionComment: String? = nil, domainId: String, id: String, requestReason: String, reviewerId: String? = nil, status: SubscriptionRequestStatus, subscribedListings: [SubscribedListing], subscribedPrincipals: [SubscribedPrincipal], updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.decisionComment = decisionComment + self.domainId = domainId + self.id = id + self.requestReason = requestReason + self.reviewerId = reviewerId + self.status = status + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case decisionComment = "decisionComment" + case domainId = "domainId" + case id = "id" + case requestReason = "requestReason" + case reviewerId = "reviewerId" + case status = "status" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CreateSubscriptionTargetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentIdentifier", location: .uri("environmentIdentifier")) + ] + + /// The asset types that can be included in the subscription target. + public let applicableAssetTypes: [String] + /// The authorized principals of the subscription target. + public let authorizedPrincipals: [String] + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The ID of the Amazon DataZone domain in which subscription target is created. + public let domainIdentifier: String + /// The ID of the environment in which subscription target is created. + public let environmentIdentifier: String + /// The manage access role that is used to create the subscription target. + public let manageAccessRole: String + /// The name of the subscription target. + public let name: String + /// The provider of the subscription target. + public let provider: String? + /// The configuration of the subscription target. + public let subscriptionTargetConfig: [SubscriptionTargetForm] + /// The type of the subscription target. + public let type: String + + public init(applicableAssetTypes: [String], authorizedPrincipals: [String], clientToken: String? = CreateSubscriptionTargetInput.idempotencyToken(), domainIdentifier: String, environmentIdentifier: String, manageAccessRole: String, name: String, provider: String? = nil, subscriptionTargetConfig: [SubscriptionTargetForm], type: String) { + self.applicableAssetTypes = applicableAssetTypes + self.authorizedPrincipals = authorizedPrincipals + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.manageAccessRole = manageAccessRole + self.name = name + self.provider = provider + self.subscriptionTargetConfig = subscriptionTargetConfig + self.type = type + } + + public func validate(name: String) throws { + try self.applicableAssetTypes.forEach { + try validate($0, name: "applicableAssetTypes[]", parent: name, max: 256) + try validate($0, name: "applicableAssetTypes[]", parent: name, min: 1) + try validate($0, name: "applicableAssetTypes[]", parent: name, pattern: "^[^\\.]*") + } + try self.authorizedPrincipals.forEach { + try validate($0, name: "authorizedPrincipals[]", parent: name, pattern: "^[a-zA-Z0-9:/_-]*$") + } + try self.validate(self.authorizedPrincipals, name: "authorizedPrincipals", parent: name, max: 10) + try self.validate(self.authorizedPrincipals, name: "authorizedPrincipals", parent: name, min: 1) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.subscriptionTargetConfig.forEach { + try $0.validate(name: "\(name).subscriptionTargetConfig[]") + } + } + + private enum CodingKeys: String, CodingKey { + case applicableAssetTypes = "applicableAssetTypes" + case authorizedPrincipals = "authorizedPrincipals" + case clientToken = "clientToken" + case manageAccessRole = "manageAccessRole" + case name = "name" + case provider = "provider" + case subscriptionTargetConfig = "subscriptionTargetConfig" + case type = "type" + } + } + + public struct CreateSubscriptionTargetOutput: AWSDecodableShape { + /// The asset types that can be included in the subscription target. + public let applicableAssetTypes: [String] + /// The authorised principals of the subscription target. + public let authorizedPrincipals: [String] + /// The timestamp of when the subscription target was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription target. + public let createdBy: String + /// The ID of the Amazon DataZone domain in which the subscription target was created. + public let domainId: String + /// The ID of the environment in which the subscription target was created. + public let environmentId: String + /// The ID of the subscription target. + public let id: String + /// The manage access role with which the subscription target was created. + public let manageAccessRole: String + /// The name of the subscription target. + public let name: String + /// ??? + public let projectId: String + /// The provider of the subscription target. + public let provider: String + /// The configuration of the subscription target. + public let subscriptionTargetConfig: [SubscriptionTargetForm] + /// The type of the subscription target. + public let type: String + /// The timestamp of when the subscription target was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the subscription target. + public let updatedBy: String? + + public init(applicableAssetTypes: [String], authorizedPrincipals: [String], createdAt: Date, createdBy: String, domainId: String, environmentId: String, id: String, manageAccessRole: String, name: String, projectId: String, provider: String, subscriptionTargetConfig: [SubscriptionTargetForm], type: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.applicableAssetTypes = applicableAssetTypes + self.authorizedPrincipals = authorizedPrincipals + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.manageAccessRole = manageAccessRole + self.name = name + self.projectId = projectId + self.provider = provider + self.subscriptionTargetConfig = subscriptionTargetConfig + self.type = type + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case applicableAssetTypes = "applicableAssetTypes" + case authorizedPrincipals = "authorizedPrincipals" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case manageAccessRole = "manageAccessRole" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case subscriptionTargetConfig = "subscriptionTargetConfig" + case type = "type" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct CreateUserProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The identifier of the Amazon DataZone domain in which a user profile is created. + public let domainIdentifier: String + /// The identifier of the user for which the user profile is created. + public let userIdentifier: String + /// The user type of the user for which the user profile is created. + public let userType: UserType? + + public init(clientToken: String? = CreateUserProfileInput.idempotencyToken(), domainIdentifier: String, userIdentifier: String, userType: UserType? = nil) { + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.userIdentifier = userIdentifier + self.userType = userType + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.userIdentifier, name: "userIdentifier", parent: name, pattern: "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[a-zA-Z_0-9+=,.@-]+$|^arn:aws:iam::\\d{12}:.+$)") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case userIdentifier = "userIdentifier" + case userType = "userType" + } + } + + public struct CreateUserProfileOutput: AWSDecodableShape { + public let details: UserProfileDetails? + /// The identifier of the Amazon DataZone domain in which a user profile is created. + public let domainId: String? + /// The identifier of the user profile. + public let id: String? + /// The status of the user profile. + public let status: UserProfileStatus? + /// The type of the user profile. + public let type: UserProfileType? + + public init(details: UserProfileDetails? = nil, domainId: String? = nil, id: String? = nil, status: UserProfileStatus? = nil, type: UserProfileType? = nil) { + self.details = details + self.domainId = domainId + self.id = id + self.status = status + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case details = "details" + case domainId = "domainId" + case id = "id" + case status = "status" + case type = "type" + } + } + + public struct CustomParameter: AWSDecodableShape { + /// The default value of the parameter. + public let defaultValue: String? + /// The description of the parameter. + public let description: String? + /// The filed type of the parameter. + public let fieldType: String + /// Specifies whether the parameter is editable. + public let isEditable: Bool? + /// Specifies whether the custom parameter is optional. + public let isOptional: Bool? + /// The key name of the parameter. + public let keyName: String + + public init(defaultValue: String? = nil, description: String? = nil, fieldType: String, isEditable: Bool? = nil, isOptional: Bool? = nil, keyName: String) { + self.defaultValue = defaultValue + self.description = description + self.fieldType = fieldType + self.isEditable = isEditable + self.isOptional = isOptional + self.keyName = keyName + } + + private enum CodingKeys: String, CodingKey { + case defaultValue = "defaultValue" + case description = "description" + case fieldType = "fieldType" + case isEditable = "isEditable" + case isOptional = "isOptional" + case keyName = "keyName" + } + } + + public struct DataProductItem: AWSDecodableShape { + public let domainId: String? + public let itemId: String? + + public init(domainId: String? = nil, itemId: String? = nil) { + self.domainId = domainId + self.itemId = itemId + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case itemId = "itemId" + } + } + + public struct DataProductSummary: AWSDecodableShape { + public let createdAt: Date? + public let createdBy: String? + public let dataProductItems: [DataProductItem]? + public let description: String? + public let domainId: String + public let glossaryTerms: [String]? + public let id: String + public let name: String + public let owningProjectId: String + public let updatedAt: Date? + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, dataProductItems: [DataProductItem]? = nil, description: String? = nil, domainId: String, glossaryTerms: [String]? = nil, id: String, name: String, owningProjectId: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.dataProductItems = dataProductItems + self.description = description + self.domainId = domainId + self.glossaryTerms = glossaryTerms + self.id = id + self.name = name + self.owningProjectId = owningProjectId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case dataProductItems = "dataProductItems" + case description = "description" + case domainId = "domainId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case name = "name" + case owningProjectId = "owningProjectId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct DataSourceErrorMessage: AWSDecodableShape { + /// The details of the error message that is returned if the operation cannot be successfully completed. + public let errorDetail: String? + /// The type of the error message that is returned if the operation cannot be successfully completed. + public let errorType: DataSourceErrorType + + public init(errorDetail: String? = nil, errorType: DataSourceErrorType) { + self.errorDetail = errorDetail + self.errorType = errorType + } + + private enum CodingKeys: String, CodingKey { + case errorDetail = "errorDetail" + case errorType = "errorType" + } + } + + public struct DataSourceRunActivity: AWSDecodableShape { + /// The timestamp of when data source run activity was created. + @CustomCoding + public var createdAt: Date + /// The identifier of the asset included in the data source run activity. + public let dataAssetId: String? + /// The status of the asset included in the data source run activity. + public let dataAssetStatus: DataAssetActivityStatus + /// The database included in the data source run activity. + public let database: String + /// The identifier of the data source for the data source run activity. + public let dataSourceRunId: String + public let errorMessage: DataSourceErrorMessage? + /// The project ID included in the data source run activity. + public let projectId: String + /// The technical description included in the data source run activity. + public let technicalDescription: String? + /// The technical name included in the data source run activity. + public let technicalName: String + /// The timestamp of when data source run activity was updated. + @CustomCoding + public var updatedAt: Date + + public init(createdAt: Date, dataAssetId: String? = nil, dataAssetStatus: DataAssetActivityStatus, database: String, dataSourceRunId: String, errorMessage: DataSourceErrorMessage? = nil, projectId: String, technicalDescription: String? = nil, technicalName: String, updatedAt: Date) { + self.createdAt = createdAt + self.dataAssetId = dataAssetId + self.dataAssetStatus = dataAssetStatus + self.database = database + self.dataSourceRunId = dataSourceRunId + self.errorMessage = errorMessage + self.projectId = projectId + self.technicalDescription = technicalDescription + self.technicalName = technicalName + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataAssetId = "dataAssetId" + case dataAssetStatus = "dataAssetStatus" + case database = "database" + case dataSourceRunId = "dataSourceRunId" + case errorMessage = "errorMessage" + case projectId = "projectId" + case technicalDescription = "technicalDescription" + case technicalName = "technicalName" + case updatedAt = "updatedAt" + } + } + + public struct DataSourceRunSummary: AWSDecodableShape { + /// The timestamp of when a data source run was created. + @CustomCoding + public var createdAt: Date + /// The identifier of the data source of the data source run. + public let dataSourceId: String + public let errorMessage: DataSourceErrorMessage? + /// The identifier of the data source run. + public let id: String + /// The project ID of the data source run. + public let projectId: String + public let runStatisticsForAssets: RunStatisticsForAssets? + /// The timestamp of when a data source run was started. + @OptionalCustomCoding + public var startedAt: Date? + /// The status of the data source run. + public let status: DataSourceRunStatus + /// The timestamp of when a data source run was stopped. + @OptionalCustomCoding + public var stoppedAt: Date? + /// The type of the data source run. + public let type: DataSourceRunType + /// The timestamp of when a data source run was updated. + @CustomCoding + public var updatedAt: Date + + public init(createdAt: Date, dataSourceId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, projectId: String, runStatisticsForAssets: RunStatisticsForAssets? = nil, startedAt: Date? = nil, status: DataSourceRunStatus, stoppedAt: Date? = nil, type: DataSourceRunType, updatedAt: Date) { + self.createdAt = createdAt + self.dataSourceId = dataSourceId + self.errorMessage = errorMessage + self.id = id + self.projectId = projectId + self.runStatisticsForAssets = runStatisticsForAssets + self.startedAt = startedAt + self.status = status + self.stoppedAt = stoppedAt + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataSourceId = "dataSourceId" + case errorMessage = "errorMessage" + case id = "id" + case projectId = "projectId" + case runStatisticsForAssets = "runStatisticsForAssets" + case startedAt = "startedAt" + case status = "status" + case stoppedAt = "stoppedAt" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct DataSourceSummary: AWSDecodableShape { + /// The timestamp of when the data source was created. + @OptionalCustomCoding + public var createdAt: Date? + /// The ID of the data source. + public let dataSourceId: String + /// The ID of the Amazon DataZone domain in which the data source exists. + public let domainId: String + /// Specifies whether the data source is enabled. + public let enableSetting: EnableSetting? + /// The ID of the environment in which the data source exists. + public let environmentId: String + /// The count of the assets created during the last data source run. + public let lastRunAssetCount: Int? + /// The timestamp of when the data source run was last performed. + @OptionalCustomCoding + public var lastRunAt: Date? + public let lastRunErrorMessage: DataSourceErrorMessage? + /// The status of the last data source run. + public let lastRunStatus: DataSourceRunStatus? + /// The name of the data source. + public let name: String + public let schedule: ScheduleConfiguration? + /// The status of the data source. + public let status: DataSourceStatus + /// The type of the data source. + public let type: String + /// The timestamp of when the data source was updated. + @OptionalCustomCoding + public var updatedAt: Date? + + public init(createdAt: Date? = nil, dataSourceId: String, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, lastRunAssetCount: Int? = nil, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus, type: String, updatedAt: Date? = nil) { + self.createdAt = createdAt + self.dataSourceId = dataSourceId + self.domainId = domainId + self.enableSetting = enableSetting + self.environmentId = environmentId + self.lastRunAssetCount = lastRunAssetCount + self.lastRunAt = lastRunAt + self.lastRunErrorMessage = lastRunErrorMessage + self.lastRunStatus = lastRunStatus + self.name = name + self.schedule = schedule + self.status = status + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataSourceId = "dataSourceId" + case domainId = "domainId" + case enableSetting = "enableSetting" + case environmentId = "environmentId" + case lastRunAssetCount = "lastRunAssetCount" + case lastRunAt = "lastRunAt" + case lastRunErrorMessage = "lastRunErrorMessage" + case lastRunStatus = "lastRunStatus" + case name = "name" + case schedule = "schedule" + case status = "status" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct DeleteAssetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the asset is deleted. + public let domainIdentifier: String + /// The identifier of the asset that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAssetOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteAssetTypeInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the asset type is deleted. + public let domainIdentifier: String + /// The identifier of the asset type that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, max: 385) + try self.validate(self.identifier, name: "identifier", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAssetTypeOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteDataSourceInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "clientToken", location: .querystring("clientToken")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The ID of the Amazon DataZone domain in which the data source is deleted. + public let domainIdentifier: String + /// The identifier of the data source that is deleted. + public let identifier: String + + public init(clientToken: String? = DeleteDataSourceInput.idempotencyToken(), domainIdentifier: String, identifier: String) { + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteDataSourceOutput: AWSDecodableShape { + /// The asset data forms associated with this data source. + public let assetFormsOutput: [FormOutput]? + /// The configuration of the data source that is deleted. + public let configuration: DataSourceConfigurationOutput? + /// The timestamp of when this data source was created. + @OptionalCustomCoding + public var createdAt: Date? + /// The description of the data source that is deleted. + public let description: String? + /// The ID of the Amazon DataZone domain in which the data source is deleted. + public let domainId: String + /// The enable setting of the data source that specifies whether the data source is enabled or disabled. + public let enableSetting: EnableSetting? + /// The ID of the environemnt associated with this data source. + public let environmentId: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let errorMessage: DataSourceErrorMessage? + /// The ID of the data source that is deleted. + public let id: String + /// The timestamp of when the data source was last run. + @OptionalCustomCoding + public var lastRunAt: Date? + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let lastRunErrorMessage: DataSourceErrorMessage? + /// The status of the last run of this data source. + public let lastRunStatus: DataSourceRunStatus? + /// The name of the data source that is deleted. + public let name: String + /// The ID of the project in which this data source exists and from which it's deleted. + public let projectId: String + /// Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog. + public let publishOnImport: Bool? + /// The schedule of runs for this data source. + public let schedule: ScheduleConfiguration? + /// The status of this data source. + public let status: DataSourceStatus? + /// The type of this data source. + public let type: String? + /// The timestamp of when this data source was updated. + @OptionalCustomCoding + public var updatedAt: Date? + + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + self.assetFormsOutput = assetFormsOutput + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.enableSetting = enableSetting + self.environmentId = environmentId + self.errorMessage = errorMessage + self.id = id + self.lastRunAt = lastRunAt + self.lastRunErrorMessage = lastRunErrorMessage + self.lastRunStatus = lastRunStatus + self.name = name + self.projectId = projectId + self.publishOnImport = publishOnImport + self.schedule = schedule + self.status = status + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case assetFormsOutput = "assetFormsOutput" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case enableSetting = "enableSetting" + case environmentId = "environmentId" + case errorMessage = "errorMessage" + case id = "id" + case lastRunAt = "lastRunAt" + case lastRunErrorMessage = "lastRunErrorMessage" + case lastRunStatus = "lastRunStatus" + case name = "name" + case projectId = "projectId" + case publishOnImport = "publishOnImport" + case schedule = "schedule" + case status = "status" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct DeleteDomainInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "clientToken", location: .querystring("clientToken")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The identifier of the Amazon Web Services domain that is to be deleted. + public let identifier: String + + public init(clientToken: String? = DeleteDomainInput.idempotencyToken(), identifier: String) { + self.clientToken = clientToken + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteDomainOutput: AWSDecodableShape { + /// The status of the domain. + public let status: DomainStatus + + public init(status: DomainStatus) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + + public struct DeleteEnvironmentBlueprintConfigurationInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentBlueprintIdentifier", location: .uri("environmentBlueprintIdentifier")) + ] + + /// The ID of the Amazon DataZone domain in which the blueprint configuration is deleted. + public let domainIdentifier: String + /// The ID of the blueprint the configuration of which is deleted. + public let environmentBlueprintIdentifier: String + + public init(domainIdentifier: String, environmentBlueprintIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteEnvironmentBlueprintConfigurationOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteEnvironmentInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the environment is deleted. + public let domainIdentifier: String + /// The identifier of the environment that is to be deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteEnvironmentProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the environment profile is deleted. + public let domainIdentifier: String + /// The ID of the environment profile that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteFormTypeInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "formTypeIdentifier", location: .uri("formTypeIdentifier")) + ] + + /// The ID of the Amazon DataZone domain in which the metadata form type is deleted. + public let domainIdentifier: String + /// The ID of the metadata form type that is deleted. + public let formTypeIdentifier: String + + public init(domainIdentifier: String, formTypeIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.formTypeIdentifier = formTypeIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.formTypeIdentifier, name: "formTypeIdentifier", parent: name, max: 385) + try self.validate(self.formTypeIdentifier, name: "formTypeIdentifier", parent: name, min: 1) + try self.validate(self.formTypeIdentifier, name: "formTypeIdentifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteFormTypeOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteGlossaryInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the business glossary is deleted. + public let domainIdentifier: String + /// The ID of the business glossary that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteGlossaryOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteGlossaryTermInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the business glossary term is deleted. + public let domainIdentifier: String + /// The ID of the business glossary term that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteGlossaryTermOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteListingInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + public let domainIdentifier: String + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteListingOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteProjectInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the project is deleted. + public let domainIdentifier: String + /// The identifier of the project that is to be deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteProjectMembershipInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "projectIdentifier", location: .uri("projectIdentifier")) + ] + + /// The ID of the Amazon DataZone domain where project membership is deleted. + public let domainIdentifier: String + /// The project member whose project membership is deleted. + public let member: Member + /// The ID of the Amazon DataZone project the membership to which is deleted. + public let projectIdentifier: String + + public init(domainIdentifier: String, member: Member, projectIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.member = member + self.projectIdentifier = projectIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case member = "member" + } + } + + public struct DeleteProjectMembershipOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteProjectOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteSubscriptionGrantInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain where the subscription grant is deleted. + public let domainIdentifier: String + /// The ID of the subscription grant that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteSubscriptionGrantOutput: AWSDecodableShape { + /// The assets for which the subsctiption grant that is deleted gave access. + public let assets: [SubscribedAsset]? + /// The timestamp of when the subscription grant that is deleted was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription grant that is deleted. + public let createdBy: String + /// The ID of the Amazon DataZone domain in which the subscription grant is deleted. + public let domainId: String + /// The entity to which the subscription is deleted. + public let grantedEntity: GrantedEntity + /// The ID of the subscription grant that is deleted. + public let id: String + /// The status of the subscription grant that is deleted. + public let status: SubscriptionGrantOverallStatus + /// The identifier of the subsctiption whose subscription grant is to be deleted. + public let subscriptionId: String? + /// The ID of the subscription target associated with the subscription grant that is deleted. + public let subscriptionTargetId: String + /// The timestamp of when the subscription grant that is deleted was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription grant that is deleted. + public let updatedBy: String? + + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = subscriptionId + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case assets = "assets" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case grantedEntity = "grantedEntity" + case id = "id" + case status = "status" + case subscriptionId = "subscriptionId" + case subscriptionTargetId = "subscriptionTargetId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct DeleteSubscriptionRequestInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the subscription request is deleted. + public let domainIdentifier: String + /// The ID of the subscription request that is deleted. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteSubscriptionTargetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentIdentifier", location: .uri("environmentIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the subscription target is deleted. + public let domainIdentifier: String + /// The ID of the Amazon DataZone environment in which the subscription target is deleted. + public let environmentIdentifier: String + /// The ID of the subscription target that is deleted. + public let identifier: String + + public init(domainIdentifier: String, environmentIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct Deployment: AWSDecodableShape { + /// The identifier of the last deployment of the environment. + public let deploymentId: String? + /// The status of the last deployment of the environment. + public let deploymentStatus: DeploymentStatus? + /// The type of the last deployment of the environment. + public let deploymentType: DeploymentType? + /// The failure reason of the last deployment of the environment. + public let failureReason: EnvironmentError? + /// Specifies whether the last deployment of the environment is complete. + public let isDeploymentComplete: Bool? + /// The messages of the last deployment of the environment. + public let messages: [String]? + + public init(deploymentId: String? = nil, deploymentStatus: DeploymentStatus? = nil, deploymentType: DeploymentType? = nil, failureReason: EnvironmentError? = nil, isDeploymentComplete: Bool? = nil, messages: [String]? = nil) { + self.deploymentId = deploymentId + self.deploymentStatus = deploymentStatus + self.deploymentType = deploymentType + self.failureReason = failureReason + self.isDeploymentComplete = isDeploymentComplete + self.messages = messages + } + + private enum CodingKeys: String, CodingKey { + case deploymentId = "deploymentId" + case deploymentStatus = "deploymentStatus" + case deploymentType = "deploymentType" + case failureReason = "failureReason" + case isDeploymentComplete = "isDeploymentComplete" + case messages = "messages" + } + } + + public struct DeploymentProperties: AWSDecodableShape { + /// The end timeout of the environment blueprint deployment. + public let endTimeoutMinutes: Int? + /// The start timeout of the environment blueprint deployment. + public let startTimeoutMinutes: Int? + + public init(endTimeoutMinutes: Int? = nil, startTimeoutMinutes: Int? = nil) { + self.endTimeoutMinutes = endTimeoutMinutes + self.startTimeoutMinutes = startTimeoutMinutes + } + + private enum CodingKeys: String, CodingKey { + case endTimeoutMinutes = "endTimeoutMinutes" + case startTimeoutMinutes = "startTimeoutMinutes" + } + } + + public struct DetailedGlossaryTerm: AWSDecodableShape { + /// The name of a glossary term attached to the inventory asset. + public let name: String? + /// The shoft description of a glossary term attached to the inventory asset. + public let shortDescription: String? + + public init(name: String? = nil, shortDescription: String? = nil) { + self.name = name + self.shortDescription = shortDescription + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case shortDescription = "shortDescription" + } + } + + public struct DomainSummary: AWSDecodableShape { + /// The ARN of the Amazon DataZone domain. + public let arn: String + /// A timestamp of when a Amazon DataZone domain was created. + public let createdAt: Date + /// A description of an Amazon DataZone domain. + public let description: String? + /// The ID of the Amazon DataZone domain. + public let id: String + /// A timestamp of when a Amazon DataZone domain was last updated. + public let lastUpdatedAt: Date? + /// The identifier of the Amazon Web Services account that manages the domain. + public let managedAccountId: String + /// A name of an Amazon DataZone domain. + public let name: String + /// The data portal URL for the Amazon DataZone domain. + public let portalUrl: String? + /// The status of the Amazon DataZone domain. + public let status: DomainStatus + + public init(arn: String, createdAt: Date, description: String? = nil, id: String, lastUpdatedAt: Date? = nil, managedAccountId: String, name: String, portalUrl: String? = nil, status: DomainStatus) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.id = id + self.lastUpdatedAt = lastUpdatedAt + self.managedAccountId = managedAccountId + self.name = name + self.portalUrl = portalUrl + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case id = "id" + case lastUpdatedAt = "lastUpdatedAt" + case managedAccountId = "managedAccountId" + case name = "name" + case portalUrl = "portalUrl" + case status = "status" + } + } + + public struct EnvironmentBlueprintConfigurationItem: AWSDecodableShape { + /// The timestamp of when an environment blueprint was created. + public let createdAt: Date? + /// The identifier of the Amazon DataZone domain in which an environment blueprint exists. + public let domainId: String + /// The enabled Amazon Web Services Regions specified in a blueprint configuration. + public let enabledRegions: [String]? + /// The identifier of the environment blueprint. + public let environmentBlueprintId: String + /// The ARN of the manage access role specified in the environment blueprint configuration. + public let manageAccessRoleArn: String? + /// The ARN of the provisioning role specified in the environment blueprint configuration. + public let provisioningRoleArn: String? + /// The regional parameters of the environment blueprint. + public let regionalParameters: [String: [String: String]]? + /// The timestamp of when the environment blueprint was updated. + public let updatedAt: Date? + + public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { + self.createdAt = createdAt + self.domainId = domainId + self.enabledRegions = enabledRegions + self.environmentBlueprintId = environmentBlueprintId + self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningRoleArn = provisioningRoleArn + self.regionalParameters = regionalParameters + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case domainId = "domainId" + case enabledRegions = "enabledRegions" + case environmentBlueprintId = "environmentBlueprintId" + case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningRoleArn = "provisioningRoleArn" + case regionalParameters = "regionalParameters" + case updatedAt = "updatedAt" + } + } + + public struct EnvironmentBlueprintSummary: AWSDecodableShape { + /// The timestamp of when an environment blueprint was created. + public let createdAt: Date? + /// The description of a blueprint. + public let description: String? + /// The identifier of the blueprint. + public let id: String + /// The name of the blueprint. + public let name: String + /// The provider of the blueprint. + public let provider: String + /// The provisioning properties of the blueprint. + public let provisioningProperties: ProvisioningProperties + /// The timestamp of when the blueprint was enabled. + public let updatedAt: Date? + + public init(createdAt: Date? = nil, description: String? = nil, id: String, name: String, provider: String, provisioningProperties: ProvisioningProperties, updatedAt: Date? = nil) { + self.createdAt = createdAt + self.description = description + self.id = id + self.name = name + self.provider = provider + self.provisioningProperties = provisioningProperties + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case description = "description" + case id = "id" + case name = "name" + case provider = "provider" + case provisioningProperties = "provisioningProperties" + case updatedAt = "updatedAt" + } + } + + public struct EnvironmentError: AWSDecodableShape { + /// The error code for the failure reason for the environment deployment. + public let code: String? + /// The error message for the failure reason for the environment deployment. + public let message: String + + public init(code: String? = nil, message: String) { + self.code = code + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case code = "code" + case message = "message" + } + } + + public struct EnvironmentParameter: AWSEncodableShape { + /// The name of an environment profile parameter. + public let name: String? + /// The value of an environment profile parameter. + public let value: String? + + public init(name: String? = nil, value: String? = nil) { + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + + public struct EnvironmentProfileSummary: AWSDecodableShape { + /// The identifier of an Amazon Web Services account in which an environment profile exists. + public let awsAccountId: String? + /// The Amazon Web Services Region in which an environment profile exists. + public let awsAccountRegion: String? + /// The timestamp of when an environment profile was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the environment profile. + public let createdBy: String + /// The description of the environment profile. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the environment profile exists. + public let domainId: String + /// The identifier of a blueprint with which an environment profile is created. + public let environmentBlueprintId: String + /// The identifier of the environment profile. + public let id: String + /// The name of the environment profile. + public let name: String + /// The identifier of a project in which an environment profile exists. + public let projectId: String? + /// The timestamp of when the environment profile was updated. + public let updatedAt: Date? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentBlueprintId: String, id: String, name: String, projectId: String? = nil, updatedAt: Date? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.environmentBlueprintId = environmentBlueprintId + self.id = id + self.name = name + self.projectId = projectId + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case environmentBlueprintId = "environmentBlueprintId" + case id = "id" + case name = "name" + case projectId = "projectId" + case updatedAt = "updatedAt" + } + } + + public struct EnvironmentSummary: AWSDecodableShape { + /// The identifier of the Amazon Web Services account in which an environment exists. + public let awsAccountId: String? + /// The Amazon Web Services Region in which an environment exists. + public let awsAccountRegion: String? + /// The timestamp of when the environment was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the environment. + public let createdBy: String + /// The description of the environment. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the environment exists. + public let domainId: String + /// The identifier of the environment profile with which the environment was created. + public let environmentProfileId: String + /// The identifier of the environment. + public let id: String? + /// The name of the environment. + public let name: String + /// The identifier of the project in which the environment exists. + public let projectId: String + /// The provider of the environment. + public let provider: String + /// The status of the environment. + public let status: EnvironmentStatus? + /// The timestamp of when the environment was updated. + public let updatedAt: Date? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentProfileId: String, id: String? = nil, name: String, projectId: String, provider: String, status: EnvironmentStatus? = nil, updatedAt: Date? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.environmentProfileId = environmentProfileId + self.id = id + self.name = name + self.projectId = projectId + self.provider = provider + self.status = status + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case environmentProfileId = "environmentProfileId" + case id = "id" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case status = "status" + case updatedAt = "updatedAt" + } + } + + public struct FailureCause: AWSEncodableShape & AWSDecodableShape { + /// The description of the error message. + public let message: String? + + public init(message: String? = nil) { + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + } + } + + public struct Filter: AWSEncodableShape { + /// A search filter attribute in Amazon DataZone. + public let attribute: String + /// A search filter value in Amazon DataZone. + public let value: String + + public init(attribute: String, value: String) { + self.attribute = attribute + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.attribute, name: "attribute", parent: name, max: 128) + try self.validate(self.attribute, name: "attribute", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case attribute = "attribute" + case value = "value" + } + } + + public struct FilterExpression: AWSEncodableShape & AWSDecodableShape { + /// The search filter expression. + public let expression: String + /// The search filter explresison type. + public let type: FilterExpressionType + + public init(expression: String, type: FilterExpressionType) { + self.expression = expression + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case expression = "expression" + case type = "type" + } + } + + public struct FormEntryInput: AWSEncodableShape { + /// Specifies whether a form entry is required. + public let required: Bool? + /// The type ID of the form entry. + public let typeIdentifier: String + /// The type revision of the form entry. + public let typeRevision: String + + public init(required: Bool? = nil, typeIdentifier: String, typeRevision: String) { + self.required = required + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + public func validate(name: String) throws { + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, max: 385) + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, min: 1) + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + try self.validate(self.typeRevision, name: "typeRevision", parent: name, max: 64) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case required = "required" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct FormEntryOutput: AWSDecodableShape { + /// Specifies whether a form entry is required. + public let required: Bool? + /// The name of the type of the form entry. + public let typeName: String + /// The type revision of the form entry. + public let typeRevision: String + + public init(required: Bool? = nil, typeName: String, typeRevision: String) { + self.required = required + self.typeName = typeName + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case required = "required" + case typeName = "typeName" + case typeRevision = "typeRevision" + } + } + + public struct FormInput: AWSEncodableShape { + /// The content of the metadata form. + public let content: String? + /// The name of the metadata form. + public let formName: String + /// The ID of the metadata form type. + public let typeIdentifier: String? + /// The revision of the metadata form type. + public let typeRevision: String? + + public init(content: String? = nil, formName: String, typeIdentifier: String? = nil, typeRevision: String? = nil) { + self.content = content + self.formName = formName + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + public func validate(name: String) throws { + try self.validate(self.formName, name: "formName", parent: name, max: 128) + try self.validate(self.formName, name: "formName", parent: name, min: 1) + try self.validate(self.formName, name: "formName", parent: name, pattern: "^(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$") + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, max: 385) + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, min: 1) + try self.validate(self.typeIdentifier, name: "typeIdentifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + try self.validate(self.typeRevision, name: "typeRevision", parent: name, max: 64) + try self.validate(self.typeRevision, name: "typeRevision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case formName = "formName" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct FormOutput: AWSDecodableShape { + /// The content of the metadata form. + public let content: String? + /// The name of the metadata form. + public let formName: String + /// The name of the metadata form type. + public let typeName: String? + /// The revision of the metadata form type. + public let typeRevision: String? + + public init(content: String? = nil, formName: String, typeName: String? = nil, typeRevision: String? = nil) { + self.content = content + self.formName = formName + self.typeName = typeName + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case formName = "formName" + case typeName = "typeName" + case typeRevision = "typeRevision" + } + } + + public struct FormTypeData: AWSDecodableShape { + /// The timestamp of when the metadata form type was created. + public let createdAt: Date? + /// The Amazon DataZone user who created teh metadata form type. + public let createdBy: String? + /// The description of the metadata form type. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the form type exists. + public let domainId: String + /// The imports specified in the form type. + public let imports: [Import]? + /// The model of the form type. + public let model: Model? + /// The name of the form type. + public let name: String + /// The identifier of the Amazon DataZone domain in which the form type was originally created. + public let originDomainId: String? + /// The identifier of the project in which the form type was originally created. + public let originProjectId: String? + /// The identifier of the project that owns the form type. + public let owningProjectId: String? + /// The revision of the form type. + public let revision: String + /// The status of the form type. + public let status: FormTypeStatus? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, imports: [Import]? = nil, model: Model? = nil, name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String? = nil, revision: String, status: FormTypeStatus? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.imports = imports + self.model = model + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case imports = "imports" + case model = "model" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case status = "status" + } + } + + public struct GetAssetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "revision", location: .querystring("revision")) + ] + + /// The ID of the Amazon DataZone domain to which the asset belongs. + public let domainIdentifier: String + /// The ID of the Amazon DataZone asset. + public let identifier: String + /// The revision of the Amazon DataZone asset. + public let revision: String? + + public init(domainIdentifier: String, identifier: String, revision: String? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.revision = revision + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetAssetOutput: AWSDecodableShape { + /// The timestamp of when the asset was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the asset. + public let createdBy: String? + /// The description of the Amazon DataZone asset. + public let description: String? + /// The ID of the Amazon DataZone domain to which the asset belongs. + public let domainId: String + public let externalIdentifier: String? + /// The timestamp of when the first revision of the asset was created. + public let firstRevisionCreatedAt: Date? + /// The Amazon DataZone user who created the first revision of the asset. + public let firstRevisionCreatedBy: String? + /// The metadata forms attached to the asset. + public let formsOutput: [FormOutput] + /// The business glossary terms attached to the asset. + public let glossaryTerms: [String]? + /// The ID of the asset. + public let id: String + public let listing: AssetListingDetails? + /// The name of the asset. + public let name: String + /// The ID of the project that owns the asset. + public let owningProjectId: String + /// The read-only metadata forms attached to the asset. + public let readOnlyFormsOutput: [FormOutput]? + /// The revision of the asset. + public let revision: String + /// The ID of the asset type. + public let typeIdentifier: String + /// The revision of the asset type. + public let typeRevision: String + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, externalIdentifier: String? = nil, firstRevisionCreatedAt: Date? = nil, firstRevisionCreatedBy: String? = nil, formsOutput: [FormOutput], glossaryTerms: [String]? = nil, id: String, listing: AssetListingDetails? = nil, name: String, owningProjectId: String, readOnlyFormsOutput: [FormOutput]? = nil, revision: String, typeIdentifier: String, typeRevision: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.externalIdentifier = externalIdentifier + self.firstRevisionCreatedAt = firstRevisionCreatedAt + self.firstRevisionCreatedBy = firstRevisionCreatedBy + self.formsOutput = formsOutput + self.glossaryTerms = glossaryTerms + self.id = id + self.listing = listing + self.name = name + self.owningProjectId = owningProjectId + self.readOnlyFormsOutput = readOnlyFormsOutput + self.revision = revision + self.typeIdentifier = typeIdentifier + self.typeRevision = typeRevision + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case externalIdentifier = "externalIdentifier" + case firstRevisionCreatedAt = "firstRevisionCreatedAt" + case firstRevisionCreatedBy = "firstRevisionCreatedBy" + case formsOutput = "formsOutput" + case glossaryTerms = "glossaryTerms" + case id = "id" + case listing = "listing" + case name = "name" + case owningProjectId = "owningProjectId" + case readOnlyFormsOutput = "readOnlyFormsOutput" + case revision = "revision" + case typeIdentifier = "typeIdentifier" + case typeRevision = "typeRevision" + } + } + + public struct GetAssetTypeInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "revision", location: .querystring("revision")) + ] + + /// The ID of the Amazon DataZone domain in which the asset type exists. + public let domainIdentifier: String + /// The ID of the asset type. + public let identifier: String + /// The revision of the asset type. + public let revision: String? + + public init(domainIdentifier: String, identifier: String, revision: String? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.revision = revision + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, max: 385) + try self.validate(self.identifier, name: "identifier", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetAssetTypeOutput: AWSDecodableShape { + /// The timestamp of when the asset type was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the asset type. + public let createdBy: String? + /// The description of the asset type. + public let description: String? + /// The ID of the Amazon DataZone domain in which the asset type exists. + public let domainId: String + /// The metadata forms attached to the asset type. + public let formsOutput: [String: FormEntryOutput] + /// The name of the asset type. + public let name: String + /// The ID of the Amazon DataZone domain in which the asset type was originally created. + public let originDomainId: String? + /// The ID of the Amazon DataZone project in which the asset type was originally created. + public let originProjectId: String? + /// The ID of the Amazon DataZone project that owns the asset type. + public let owningProjectId: String + /// The revision of the asset type. + public let revision: String + /// The timestamp of when the asset type was updated. + public let updatedAt: Date? + /// The Amazon DataZone user that updated the asset type. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, formsOutput: [String: FormEntryOutput], name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String, revision: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.formsOutput = formsOutput + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case formsOutput = "formsOutput" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetDataSourceInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the data source exists. + public let domainIdentifier: String + /// The ID of the Amazon DataZone data source. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetDataSourceOutput: AWSDecodableShape { + /// The metadata forms attached to the assets created by this data source. + public let assetFormsOutput: [FormOutput]? + /// The configuration of the data source. + public let configuration: DataSourceConfigurationOutput? + /// The timestamp of when the data source was created. + @OptionalCustomCoding + public var createdAt: Date? + /// The description of the data source. + public let description: String? + /// The ID of the Amazon DataZone domain in which the data source exists. + public let domainId: String + /// Specifies whether this data source is enabled or not. + public let enableSetting: EnableSetting? + /// The ID of the environment where this data source creates and publishes assets, + public let environmentId: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let errorMessage: DataSourceErrorMessage? + /// The ID of the data source. + public let id: String + /// The number of assets created by the data source during its last run. + public let lastRunAssetCount: Int? + /// The timestamp of the last run of the data source. + @OptionalCustomCoding + public var lastRunAt: Date? + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let lastRunErrorMessage: DataSourceErrorMessage? + /// The status of the last run of the data source. + public let lastRunStatus: DataSourceRunStatus? + /// The name of the data source. + public let name: String + /// The ID of the project where the data source creates and publishes assets. + public let projectId: String + /// Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog. + public let publishOnImport: Bool? + public let recommendation: RecommendationConfiguration? + /// The schedule of the data source runs. + public let schedule: ScheduleConfiguration? + /// The status of the data source. + public let status: DataSourceStatus? + /// The type of the data source. + public let type: String? + /// The timestamp of when the data source was updated. + @OptionalCustomCoding + public var updatedAt: Date? + + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAssetCount: Int? = nil, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + self.assetFormsOutput = assetFormsOutput + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.enableSetting = enableSetting + self.environmentId = environmentId + self.errorMessage = errorMessage + self.id = id + self.lastRunAssetCount = lastRunAssetCount + self.lastRunAt = lastRunAt + self.lastRunErrorMessage = lastRunErrorMessage + self.lastRunStatus = lastRunStatus + self.name = name + self.projectId = projectId + self.publishOnImport = publishOnImport + self.recommendation = recommendation + self.schedule = schedule + self.status = status + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case assetFormsOutput = "assetFormsOutput" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case enableSetting = "enableSetting" + case environmentId = "environmentId" + case errorMessage = "errorMessage" + case id = "id" + case lastRunAssetCount = "lastRunAssetCount" + case lastRunAt = "lastRunAt" + case lastRunErrorMessage = "lastRunErrorMessage" + case lastRunStatus = "lastRunStatus" + case name = "name" + case projectId = "projectId" + case publishOnImport = "publishOnImport" + case recommendation = "recommendation" + case schedule = "schedule" + case status = "status" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct GetDataSourceRunInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the domain in which this data source run was performed. + public let domainIdentifier: String + /// The ID of the data source run. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetDataSourceRunOutput: AWSDecodableShape { + /// The timestamp of when the data source run was created. + @CustomCoding + public var createdAt: Date + /// The configuration snapshot of the data source run. + public let dataSourceConfigurationSnapshot: String? + /// The ID of the data source for this data source run. + public let dataSourceId: String + /// The ID of the domain in which this data source run was performed. + public let domainId: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let errorMessage: DataSourceErrorMessage? + /// The ID of the data source run. + public let id: String + /// The ID of the project in which this data source run occured. + public let projectId: String + /// The asset statistics from this data source run. + public let runStatisticsForAssets: RunStatisticsForAssets? + /// The timestamp of when this data source run started. + @OptionalCustomCoding + public var startedAt: Date? + /// The status of this data source run. + public let status: DataSourceRunStatus + /// The timestamp of when this data source run stopped. + @OptionalCustomCoding + public var stoppedAt: Date? + /// The type of this data source run. + public let type: DataSourceRunType + /// The timestamp of when this data source run was updated. + @CustomCoding + public var updatedAt: Date + + public init(createdAt: Date, dataSourceConfigurationSnapshot: String? = nil, dataSourceId: String, domainId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, projectId: String, runStatisticsForAssets: RunStatisticsForAssets? = nil, startedAt: Date? = nil, status: DataSourceRunStatus, stoppedAt: Date? = nil, type: DataSourceRunType, updatedAt: Date) { + self.createdAt = createdAt + self.dataSourceConfigurationSnapshot = dataSourceConfigurationSnapshot + self.dataSourceId = dataSourceId + self.domainId = domainId + self.errorMessage = errorMessage + self.id = id + self.projectId = projectId + self.runStatisticsForAssets = runStatisticsForAssets + self.startedAt = startedAt + self.status = status + self.stoppedAt = stoppedAt + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataSourceConfigurationSnapshot = "dataSourceConfigurationSnapshot" + case dataSourceId = "dataSourceId" + case domainId = "domainId" + case errorMessage = "errorMessage" + case id = "id" + case projectId = "projectId" + case runStatisticsForAssets = "runStatisticsForAssets" + case startedAt = "startedAt" + case status = "status" + case stoppedAt = "stoppedAt" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct GetDomainInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the specified Amazon DataZone domain. + public let identifier: String + + public init(identifier: String) { + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetDomainOutput: AWSDecodableShape { + /// The ARN of the specified Amazon DataZone domain. + public let arn: String? + /// The timestamp of when the Amazon DataZone domain was created. + public let createdAt: Date? + /// The description of the Amazon DataZone domain. + public let description: String? + /// The domain execution role with which the Amazon DataZone domain is created. + public let domainExecutionRole: String + /// The identifier of the specified Amazon DataZone domain. + public let id: String + /// The identifier of the Amazon Web Services Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data. + public let kmsKeyIdentifier: String? + /// The timestamp of when the Amazon DataZone domain was last updated. + public let lastUpdatedAt: Date? + /// The name of the Amazon DataZone domain. + public let name: String? + /// The URL of the data portal for this Amazon DataZone domain. + public let portalUrl: String? + /// The single sing-on option of the specified Amazon DataZone domain. + public let singleSignOn: SingleSignOn? + /// The status of the specified Amazon DataZone domain. + public let status: DomainStatus + /// The tags specified for the Amazon DataZone domain. + public let tags: [String: String]? + + public init(arn: String? = nil, createdAt: Date? = nil, description: String? = nil, domainExecutionRole: String, id: String, kmsKeyIdentifier: String? = nil, lastUpdatedAt: Date? = nil, name: String? = nil, portalUrl: String? = nil, singleSignOn: SingleSignOn? = nil, status: DomainStatus, tags: [String: String]? = nil) { + self.arn = arn + self.createdAt = createdAt + self.description = description + self.domainExecutionRole = domainExecutionRole + self.id = id + self.kmsKeyIdentifier = kmsKeyIdentifier + self.lastUpdatedAt = lastUpdatedAt + self.name = name + self.portalUrl = portalUrl + self.singleSignOn = singleSignOn + self.status = status + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + case description = "description" + case domainExecutionRole = "domainExecutionRole" + case id = "id" + case kmsKeyIdentifier = "kmsKeyIdentifier" + case lastUpdatedAt = "lastUpdatedAt" + case name = "name" + case portalUrl = "portalUrl" + case singleSignOn = "singleSignOn" + case status = "status" + case tags = "tags" + } + } + + public struct GetEnvironmentBlueprintConfigurationInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentBlueprintIdentifier", location: .uri("environmentBlueprintIdentifier")) + ] + + /// The ID of the Amazon DataZone domain where this blueprint exists. + public let domainIdentifier: String + /// He ID of the blueprint. + public let environmentBlueprintIdentifier: String + + public init(domainIdentifier: String, environmentBlueprintIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEnvironmentBlueprintConfigurationOutput: AWSDecodableShape { + /// The timestamp of when this blueprint was created. + public let createdAt: Date? + /// The ID of the Amazon DataZone domain where this blueprint exists. + public let domainId: String + /// The Amazon Web Services regions in which this blueprint is enabled. + public let enabledRegions: [String]? + /// The ID of the blueprint. + public let environmentBlueprintId: String + /// The ARN of the manage access role with which this blueprint is created. + public let manageAccessRoleArn: String? + /// The ARN of the provisioning role with which this blueprint is created. + public let provisioningRoleArn: String? + /// The regional parameters of the blueprint. + public let regionalParameters: [String: [String: String]]? + /// The timestamp of when this blueprint was upated. + public let updatedAt: Date? + + public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { + self.createdAt = createdAt + self.domainId = domainId + self.enabledRegions = enabledRegions + self.environmentBlueprintId = environmentBlueprintId + self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningRoleArn = provisioningRoleArn + self.regionalParameters = regionalParameters + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case domainId = "domainId" + case enabledRegions = "enabledRegions" + case environmentBlueprintId = "environmentBlueprintId" + case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningRoleArn = "provisioningRoleArn" + case regionalParameters = "regionalParameters" + case updatedAt = "updatedAt" + } + } + + public struct GetEnvironmentBlueprintInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the domain in which this blueprint exists. + public let domainIdentifier: String + /// The ID of this Amazon DataZone blueprint. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEnvironmentBlueprintOutput: AWSDecodableShape { + /// A timestamp of when this blueprint was created. + public let createdAt: Date? + /// The deployment properties of this Amazon DataZone blueprint. + public let deploymentProperties: DeploymentProperties? + /// The description of this Amazon DataZone blueprint. + public let description: String? + /// The glossary terms attached to this Amazon DataZone blueprint. + public let glossaryTerms: [String]? + /// The ID of this Amazon DataZone blueprint. + public let id: String + /// The name of this Amazon DataZone blueprint. + public let name: String + /// The provider of this Amazon DataZone blueprint. + public let provider: String + /// The provisioning properties of this Amazon DataZone blueprint. + public let provisioningProperties: ProvisioningProperties + /// The timestamp of when this blueprint was updated. + public let updatedAt: Date? + /// The user parameters of this blueprint. + public let userParameters: [CustomParameter]? + + public init(createdAt: Date? = nil, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, glossaryTerms: [String]? = nil, id: String, name: String, provider: String, provisioningProperties: ProvisioningProperties, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.createdAt = createdAt + self.deploymentProperties = deploymentProperties + self.description = description + self.glossaryTerms = glossaryTerms + self.id = id + self.name = name + self.provider = provider + self.provisioningProperties = provisioningProperties + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case deploymentProperties = "deploymentProperties" + case description = "description" + case glossaryTerms = "glossaryTerms" + case id = "id" + case name = "name" + case provider = "provider" + case provisioningProperties = "provisioningProperties" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct GetEnvironmentInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain where the environment exists. + public let domainIdentifier: String + /// The ID of the Amazon DataZone environment. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEnvironmentOutput: AWSDecodableShape { + /// The ID of the Amazon Web Services account where the environment exists. + public let awsAccountId: String? + /// The Amazon Web Services region where the environment exists. + public let awsAccountRegion: String? + /// The timestamp of when the environment was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the environment. + public let createdBy: String + /// The deployment properties of the environment. + public let deploymentProperties: DeploymentProperties? + /// The description of the environment. + public let description: String? + /// The ID of the Amazon DataZone domain where the environment exists. + public let domainId: String + /// The actions of the environment. + public let environmentActions: [ConfigurableEnvironmentAction]? + /// The blueprint with which the environment is created. + public let environmentBlueprintId: String? + /// The ID of the environment profile with which the environment is created. + public let environmentProfileId: String + /// The business glossary terms that can be used in this environment. + public let glossaryTerms: [String]? + /// The ID of the environment. + public let id: String? + /// The details of the last deployment of the environment. + public let lastDeployment: Deployment? + /// The name of the environment. + public let name: String + /// The ID of the Amazon DataZone project in which this environment is created. + public let projectId: String + /// The provider of this Amazon DataZone environment. + public let provider: String + /// The provisioned resources of this Amazon DataZone environment. + public let provisionedResources: [Resource]? + /// The provisioning properties of this Amazon DataZone environment. + public let provisioningProperties: ProvisioningProperties? + /// The status of this Amazon DataZone environment. + public let status: EnvironmentStatus? + /// The timestamp of when this environment was updated. + public let updatedAt: Date? + /// The user parameters of this Amazon DataZone environment. + public let userParameters: [CustomParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.deploymentProperties = deploymentProperties + self.description = description + self.domainId = domainId + self.environmentActions = environmentActions + self.environmentBlueprintId = environmentBlueprintId + self.environmentProfileId = environmentProfileId + self.glossaryTerms = glossaryTerms + self.id = id + self.lastDeployment = lastDeployment + self.name = name + self.projectId = projectId + self.provider = provider + self.provisionedResources = provisionedResources + self.provisioningProperties = provisioningProperties + self.status = status + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case deploymentProperties = "deploymentProperties" + case description = "description" + case domainId = "domainId" + case environmentActions = "environmentActions" + case environmentBlueprintId = "environmentBlueprintId" + case environmentProfileId = "environmentProfileId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case lastDeployment = "lastDeployment" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case provisionedResources = "provisionedResources" + case provisioningProperties = "provisioningProperties" + case status = "status" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct GetEnvironmentProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which this environment profile exists. + public let domainIdentifier: String + /// The ID of the environment profile. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEnvironmentProfileOutput: AWSDecodableShape { + /// The ID of the Amazon Web Services account where this environment profile exists. + public let awsAccountId: String? + /// The Amazon Web Services region where this environment profile exists. + public let awsAccountRegion: String? + /// The timestamp of when this environment profile was created. + public let createdAt: Date? + /// The Amazon DataZone user who created this environment profile. + public let createdBy: String + /// The description of the environment profile. + public let description: String? + /// The ID of the Amazon DataZone domain in which this environment profile exists. + public let domainId: String + /// The ID of the blueprint with which this environment profile is created. + public let environmentBlueprintId: String + /// The ID of the environment profile. + public let id: String + /// The name of the environment profile. + public let name: String + /// The ID of the Amazon DataZone project in which this environment profile is created. + public let projectId: String? + /// The timestamp of when this environment profile was upated. + public let updatedAt: Date? + /// The user parameters of the environment profile. + public let userParameters: [CustomParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentBlueprintId: String, id: String, name: String, projectId: String? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.environmentBlueprintId = environmentBlueprintId + self.id = id + self.name = name + self.projectId = projectId + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case environmentBlueprintId = "environmentBlueprintId" + case id = "id" + case name = "name" + case projectId = "projectId" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct GetFormTypeInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "formTypeIdentifier", location: .uri("formTypeIdentifier")), + AWSMemberEncoding(label: "revision", location: .querystring("revision")) + ] + + /// The ID of the Amazon DataZone domain in which this metadata form type exists. + public let domainIdentifier: String + /// The ID of the metadata form type. + public let formTypeIdentifier: String + /// The revision of this metadata form type. + public let revision: String? + + public init(domainIdentifier: String, formTypeIdentifier: String, revision: String? = nil) { + self.domainIdentifier = domainIdentifier + self.formTypeIdentifier = formTypeIdentifier + self.revision = revision + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.formTypeIdentifier, name: "formTypeIdentifier", parent: name, max: 385) + try self.validate(self.formTypeIdentifier, name: "formTypeIdentifier", parent: name, min: 1) + try self.validate(self.formTypeIdentifier, name: "formTypeIdentifier", parent: name, pattern: "^(?!\\.)[\\w\\.]*\\w$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetFormTypeOutput: AWSDecodableShape { + /// The timestamp of when this metadata form type was created. + public let createdAt: Date? + /// The Amazon DataZone user who created this metadata form type. + public let createdBy: String? + /// The description of the metadata form type. + public let description: String? + /// The ID of the Amazon DataZone domain in which this metadata form type exists. + public let domainId: String + /// The imports of the metadata form type. + public let imports: [Import]? + /// The model of the metadata form type. + public let model: Model + /// The name of the metadata form type. + public let name: String + /// The ID of the Amazon DataZone domain in which the metadata form type was originally created. + public let originDomainId: String? + /// The ID of the project in which this metadata form type was originally created. + public let originProjectId: String? + /// The ID of the project that owns this metadata form type. + public let owningProjectId: String? + /// The revision of the metadata form type. + public let revision: String + /// The status of the metadata form type. + public let status: FormTypeStatus? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, imports: [Import]? = nil, model: Model, name: String, originDomainId: String? = nil, originProjectId: String? = nil, owningProjectId: String? = nil, revision: String, status: FormTypeStatus? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.imports = imports + self.model = model + self.name = name + self.originDomainId = originDomainId + self.originProjectId = originProjectId + self.owningProjectId = owningProjectId + self.revision = revision + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case imports = "imports" + case model = "model" + case name = "name" + case originDomainId = "originDomainId" + case originProjectId = "originProjectId" + case owningProjectId = "owningProjectId" + case revision = "revision" + case status = "status" + } + } + + public struct GetGlossaryInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which this business glossary exists. + public let domainIdentifier: String + /// The ID of the business glossary. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetGlossaryOutput: AWSDecodableShape { + /// The timestamp of when this business glossary was created. + public let createdAt: Date? + /// The Amazon DataZone user who created this business glossary. + public let createdBy: String? + /// The description of the business glossary. + public let description: String? + /// The ID of the Amazon DataZone domain in which this business glossary exists. + public let domainId: String + /// The ID of the business glossary. + public let id: String + /// The name of the business glossary. + public let name: String + /// The ID of the project that owns this business glossary. + public let owningProjectId: String + /// The status of the business glossary. + public let status: GlossaryStatus + /// The timestamp of when the business glossary was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the business glossary. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, id: String, name: String, owningProjectId: String, status: GlossaryStatus, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.id = id + self.name = name + self.owningProjectId = owningProjectId + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case id = "id" + case name = "name" + case owningProjectId = "owningProjectId" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetGlossaryTermInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which this business glossary term exists. + public let domainIdentifier: String + /// The ID of the business glossary term. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetGlossaryTermOutput: AWSDecodableShape { + /// The timestamp of when the business glossary term was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the business glossary. + public let createdBy: String? + /// The ID of the Amazon DataZone domain in which this business glossary term exists. + public let domainId: String + /// The ID of the business glossary to which this term belongs. + public let glossaryId: String + /// The ID of the business glossary term. + public let id: String + /// The long description of the business glossary term. + public let longDescription: String? + /// The name of the business glossary term. + public let name: String + /// The short decription of the business glossary term. + public let shortDescription: String? + /// The status of the business glossary term. + public let status: GlossaryTermStatus + /// The relations of the business glossary term. + public let termRelations: TermRelations? + /// The timestamp of when the business glossary term was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the business glossary term. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, domainId: String, glossaryId: String, id: String, longDescription: String? = nil, name: String, shortDescription: String? = nil, status: GlossaryTermStatus, termRelations: TermRelations? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.glossaryId = glossaryId + self.id = id + self.longDescription = longDescription + self.name = name + self.shortDescription = shortDescription + self.status = status + self.termRelations = termRelations + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case glossaryId = "glossaryId" + case id = "id" + case longDescription = "longDescription" + case name = "name" + case shortDescription = "shortDescription" + case status = "status" + case termRelations = "termRelations" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetGroupProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "groupIdentifier", location: .uri("groupIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain in which the group profile exists. + public let domainIdentifier: String + /// The identifier of the group profile. + public let groupIdentifier: String + + public init(domainIdentifier: String, groupIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.groupIdentifier = groupIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.groupIdentifier, name: "groupIdentifier", parent: name, pattern: "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r ]+)") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetGroupProfileOutput: AWSDecodableShape { + /// The identifier of the Amazon DataZone domain in which the group profile exists. + public let domainId: String? + /// The name of the group for which the specified group profile exists. + public let groupName: String? + /// The identifier of the group profile. + public let id: String? + /// The identifier of the group profile. + public let status: GroupProfileStatus? + + public init(domainId: String? = nil, groupName: String? = nil, id: String? = nil, status: GroupProfileStatus? = nil) { + self.domainId = domainId + self.groupName = groupName + self.id = id + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case groupName = "groupName" + case id = "id" + case status = "status" + } + } + + public struct GetIamPortalLoginUrlInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// the ID of the Amazon DataZone domain the data portal of which you want to get. + public let domainIdentifier: String + + public init(domainIdentifier: String) { + self.domainIdentifier = domainIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetIamPortalLoginUrlOutput: AWSDecodableShape { + /// The data portal URL of the specified Amazon DataZone domain. + public let authCodeUrl: String? + /// The ID of the user profile. + public let userProfileId: String + + public init(authCodeUrl: String? = nil, userProfileId: String) { + self.authCodeUrl = authCodeUrl + self.userProfileId = userProfileId + } + + private enum CodingKeys: String, CodingKey { + case authCodeUrl = "authCodeUrl" + case userProfileId = "userProfileId" + } + } + + public struct GetListingInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "listingRevision", location: .querystring("listingRevision")) + ] + + public let domainIdentifier: String + public let identifier: String + public let listingRevision: String? + + public init(domainIdentifier: String, identifier: String, listingRevision: String? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.listingRevision = listingRevision + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.listingRevision, name: "listingRevision", parent: name, max: 64) + try self.validate(self.listingRevision, name: "listingRevision", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetListingOutput: AWSDecodableShape { + public let createdAt: Date? + /// The Amazon DataZone user who created the listing. + public let createdBy: String? + public let description: String? + public let domainId: String + public let id: String + public let item: ListingItem? + public let listingRevision: String + public let name: String? + public let status: ListingStatus? + /// The timestamp of when the listing was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the listing. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, id: String, item: ListingItem? = nil, listingRevision: String, name: String? = nil, status: ListingStatus? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.id = id + self.item = item + self.listingRevision = listingRevision + self.name = name + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case id = "id" + case item = "item" + case listingRevision = "listingRevision" + case name = "name" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetProjectInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the project exists. + public let domainIdentifier: String + /// The ID of the project. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetProjectOutput: AWSDecodableShape { + /// The timestamp of when the project was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the project. + public let createdBy: String + /// The description of the project. + public let description: String? + /// The ID of the Amazon DataZone domain in which the project exists. + public let domainId: String + /// The business glossary terms that can be used in the project. + public let glossaryTerms: [String]? + /// >The ID of the project. + public let id: String + /// The timestamp of when the project was last updated. + public let lastUpdatedAt: Date? + /// The name of the project. + public let name: String + + public init(createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, glossaryTerms: [String]? = nil, id: String, lastUpdatedAt: Date? = nil, name: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.glossaryTerms = glossaryTerms + self.id = id + self.lastUpdatedAt = lastUpdatedAt + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case lastUpdatedAt = "lastUpdatedAt" + case name = "name" + } + } + + public struct GetSubscriptionGrantInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the subscription grant exists. + public let domainIdentifier: String + /// The ID of the subscription grant. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSubscriptionGrantOutput: AWSDecodableShape { + /// The assets for which the subscription grant is created. + public let assets: [SubscribedAsset]? + /// The timestamp of when the subscription grant is created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription grant. + public let createdBy: String + /// The ID of the Amazon DataZone domain in which the subscription grant exists. + public let domainId: String + /// The entity to which the subscription is granted. + public let grantedEntity: GrantedEntity + /// The ID of the subscription grant. + public let id: String + /// The status of the subscription grant. + public let status: SubscriptionGrantOverallStatus + /// The identifier of the subscription. + public let subscriptionId: String? + /// The subscription target ID associated with the subscription grant. + public let subscriptionTargetId: String + /// The timestamp of when the subscription grant was upated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription grant. + public let updatedBy: String? + + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = subscriptionId + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case assets = "assets" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case grantedEntity = "grantedEntity" + case id = "id" + case status = "status" + case subscriptionId = "subscriptionId" + case subscriptionTargetId = "subscriptionTargetId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetSubscriptionInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the subscription exists. + public let domainIdentifier: String + /// The ID of the subscription. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSubscriptionOutput: AWSDecodableShape { + /// The timestamp of when the subscription was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription. + public let createdBy: String + /// The ID of the Amazon DataZone domain in which the subscription exists. + public let domainId: String + /// The ID of the subscription. + public let id: String + /// The retain permissions of the subscription. + public let retainPermissions: Bool? + /// The status of the subscription. + public let status: SubscriptionStatus + public let subscribedListing: SubscribedListing + /// The principal that owns the subscription. + public let subscribedPrincipal: SubscribedPrincipal + /// The ID of the subscription request. + public let subscriptionRequestId: String? + /// The timestamp of when the subscription was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, domainId: String, id: String, retainPermissions: Bool? = nil, status: SubscriptionStatus, subscribedListing: SubscribedListing, subscribedPrincipal: SubscribedPrincipal, subscriptionRequestId: String? = nil, updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.id = id + self.retainPermissions = retainPermissions + self.status = status + self.subscribedListing = subscribedListing + self.subscribedPrincipal = subscribedPrincipal + self.subscriptionRequestId = subscriptionRequestId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case id = "id" + case retainPermissions = "retainPermissions" + case status = "status" + case subscribedListing = "subscribedListing" + case subscribedPrincipal = "subscribedPrincipal" + case subscriptionRequestId = "subscriptionRequestId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetSubscriptionRequestDetailsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the Amazon DataZone domain in which to get the subscription request details. + public let domainIdentifier: String + /// The identifier of the subscription request the details of which to get. + public let identifier: String + + public init(domainIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSubscriptionRequestDetailsOutput: AWSDecodableShape { + /// The timestamp of when the specified subscription request was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription request. + public let createdBy: String + /// The decision comment of the subscription request. + public let decisionComment: String? + /// The Amazon DataZone domain of the subscription request. + public let domainId: String + /// The identifier of the subscription request. + public let id: String + /// The reason for the subscription request. + public let requestReason: String + /// The identifier of the Amazon DataZone user who reviewed the subscription request. + public let reviewerId: String? + /// The status of the subscription request. + public let status: SubscriptionRequestStatus + /// The subscribed listings in the subscription request. + public let subscribedListings: [SubscribedListing] + /// The subscribed principals in the subscription request. + public let subscribedPrincipals: [SubscribedPrincipal] + /// The timestamp of when the subscription request was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription request. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, decisionComment: String? = nil, domainId: String, id: String, requestReason: String, reviewerId: String? = nil, status: SubscriptionRequestStatus, subscribedListings: [SubscribedListing], subscribedPrincipals: [SubscribedPrincipal], updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.decisionComment = decisionComment + self.domainId = domainId + self.id = id + self.requestReason = requestReason + self.reviewerId = reviewerId + self.status = status + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case decisionComment = "decisionComment" + case domainId = "domainId" + case id = "id" + case requestReason = "requestReason" + case reviewerId = "reviewerId" + case status = "status" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetSubscriptionTargetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentIdentifier", location: .uri("environmentIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The ID of the Amazon DataZone domain in which the subscription target exists. + public let domainIdentifier: String + /// The ID of the environment associated with the subscription target. + public let environmentIdentifier: String + /// The ID of the subscription target. + public let identifier: String + + public init(domainIdentifier: String, environmentIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSubscriptionTargetOutput: AWSDecodableShape { + /// The asset types associated with the subscription target. + public let applicableAssetTypes: [String] + /// The authorized principals of the subscription target. + public let authorizedPrincipals: [String] + /// The timestamp of when the subscription target was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription target. + public let createdBy: String + /// The ID of the Amazon DataZone domain in which the subscription target exists. + public let domainId: String + /// The ID of the environment associated with the subscription target. + public let environmentId: String + /// The ID of the subscription target. + public let id: String + /// The manage access role with which the subscription target was created. + public let manageAccessRole: String + /// The name of the subscription target. + public let name: String + /// The ID of the project associated with the subscription target. + public let projectId: String + /// The provider of the subscription target. + public let provider: String + /// The configuration of teh subscription target. + public let subscriptionTargetConfig: [SubscriptionTargetForm] + /// The type of the subscription target. + public let type: String + /// The timestamp of when the subscription target was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the subscription target. + public let updatedBy: String? + + public init(applicableAssetTypes: [String], authorizedPrincipals: [String], createdAt: Date, createdBy: String, domainId: String, environmentId: String, id: String, manageAccessRole: String, name: String, projectId: String, provider: String, subscriptionTargetConfig: [SubscriptionTargetForm], type: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.applicableAssetTypes = applicableAssetTypes + self.authorizedPrincipals = authorizedPrincipals + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.manageAccessRole = manageAccessRole + self.name = name + self.projectId = projectId + self.provider = provider + self.subscriptionTargetConfig = subscriptionTargetConfig + self.type = type + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case applicableAssetTypes = "applicableAssetTypes" + case authorizedPrincipals = "authorizedPrincipals" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case manageAccessRole = "manageAccessRole" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case subscriptionTargetConfig = "subscriptionTargetConfig" + case type = "type" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GetUserProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "type", location: .querystring("type")), + AWSMemberEncoding(label: "userIdentifier", location: .uri("userIdentifier")) + ] + + /// the ID of the Amazon DataZone domain the data portal of which you want to get. + public let domainIdentifier: String + /// The type of the user profile. + public let type: UserProfileType? + /// The identifier of the user for which you want to get the user profile. + public let userIdentifier: String + + public init(domainIdentifier: String, type: UserProfileType? = nil, userIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.type = type + self.userIdentifier = userIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.userIdentifier, name: "userIdentifier", parent: name, pattern: "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[a-zA-Z_0-9+=,.@-]+$|^arn:aws:iam::\\d{12}:.+$)") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetUserProfileOutput: AWSDecodableShape { + public let details: UserProfileDetails? + /// the identifier of the Amazon DataZone domain of which you want to get the user profile. + public let domainId: String? + /// The identifier of the user profile. + public let id: String? + /// The status of the user profile. + public let status: UserProfileStatus? + /// The type of the user profile. + public let type: UserProfileType? + + public init(details: UserProfileDetails? = nil, domainId: String? = nil, id: String? = nil, status: UserProfileStatus? = nil, type: UserProfileType? = nil) { + self.details = details + self.domainId = domainId + self.id = id + self.status = status + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case details = "details" + case domainId = "domainId" + case id = "id" + case status = "status" + case type = "type" + } + } + + public struct GlossaryItem: AWSDecodableShape { + /// The timestamp of when the glossary was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the glossary. + public let createdBy: String? + /// The business glossary description. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the business glossary exists. + public let domainId: String + /// The identifier of the glossary. + public let id: String + /// The name of the glossary. + public let name: String + /// The identifier of the project that owns the business glosary. + public let owningProjectId: String + /// The business glossary status. + public let status: GlossaryStatus + /// The timestamp of when the business glossary was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the business glossary. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, domainId: String, id: String, name: String, owningProjectId: String, status: GlossaryStatus, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.id = id + self.name = name + self.owningProjectId = owningProjectId + self.status = status + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case id = "id" + case name = "name" + case owningProjectId = "owningProjectId" + case status = "status" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GlossaryTermItem: AWSDecodableShape { + /// The timestamp of when a business glossary term was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the business glossary. + public let createdBy: String? + /// The identifier of the Amazon DataZone domain in which the business glossary exists. + public let domainId: String + /// The identifier of the business glossary to which the term belongs. + public let glossaryId: String + /// The identifier of the business glossary term. + public let id: String + /// The long description of the business glossary term. + public let longDescription: String? + /// The name of the business glossary term. + public let name: String + /// The short description of the business glossary term. + public let shortDescription: String? + /// The status of the business glossary term. + public let status: GlossaryTermStatus + /// The relations of the business glossary term. + public let termRelations: TermRelations? + /// The timestamp of when a business glossary term was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the business glossary term. + public let updatedBy: String? + + public init(createdAt: Date? = nil, createdBy: String? = nil, domainId: String, glossaryId: String, id: String, longDescription: String? = nil, name: String, shortDescription: String? = nil, status: GlossaryTermStatus, termRelations: TermRelations? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.glossaryId = glossaryId + self.id = id + self.longDescription = longDescription + self.name = name + self.shortDescription = shortDescription + self.status = status + self.termRelations = termRelations + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case glossaryId = "glossaryId" + case id = "id" + case longDescription = "longDescription" + case name = "name" + case shortDescription = "shortDescription" + case status = "status" + case termRelations = "termRelations" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct GlueRunConfigurationInput: AWSEncodableShape { + /// The data access role included in the configuration details of the Amazon Web Services Glue data source. + public let dataAccessRole: String? + /// The relational filter configurations included in the configuration details of the Amazon Web Services Glue data source. + public let relationalFilterConfigurations: [RelationalFilterConfiguration] + + public init(dataAccessRole: String? = nil, relationalFilterConfigurations: [RelationalFilterConfiguration]) { + self.dataAccessRole = dataAccessRole + self.relationalFilterConfigurations = relationalFilterConfigurations + } + + private enum CodingKeys: String, CodingKey { + case dataAccessRole = "dataAccessRole" + case relationalFilterConfigurations = "relationalFilterConfigurations" + } + } + + public struct GlueRunConfigurationOutput: AWSDecodableShape { + /// The Amazon Web Services account ID included in the configuration details of the Amazon Web Services Glue data source. + public let accountId: String? + /// The data access role included in the configuration details of the Amazon Web Services Glue data source. + public let dataAccessRole: String? + /// The Amazon Web Services region included in the configuration details of the Amazon Web Services Glue data source. + public let region: String? + /// The relational filter configurations included in the configuration details of the Amazon Web Services Glue data source. + public let relationalFilterConfigurations: [RelationalFilterConfiguration] + + public init(accountId: String? = nil, dataAccessRole: String? = nil, region: String? = nil, relationalFilterConfigurations: [RelationalFilterConfiguration]) { + self.accountId = accountId + self.dataAccessRole = dataAccessRole + self.region = region + self.relationalFilterConfigurations = relationalFilterConfigurations + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case dataAccessRole = "dataAccessRole" + case region = "region" + case relationalFilterConfigurations = "relationalFilterConfigurations" + } + } + + public struct GroupDetails: AWSDecodableShape { + /// The identifier of the group in Amazon DataZone. + public let groupId: String + + public init(groupId: String) { + self.groupId = groupId + } + + private enum CodingKeys: String, CodingKey { + case groupId = "groupId" + } + } + + public struct GroupProfileSummary: AWSDecodableShape { + /// The ID of the Amazon DataZone domain of a group profile. + public let domainId: String? + /// The group name of a group profile. + public let groupName: String? + /// The ID of a group profile. + public let id: String? + /// The status of a group profile. + public let status: GroupProfileStatus? + + public init(domainId: String? = nil, groupName: String? = nil, id: String? = nil, status: GroupProfileStatus? = nil) { + self.domainId = domainId + self.groupName = groupName + self.id = id + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case groupName = "groupName" + case id = "id" + case status = "status" + } + } + + public struct IamUserProfileDetails: AWSDecodableShape { + /// The ARN of an IAM user profile in Amazon DataZone. + public let arn: String? + + public init(arn: String? = nil) { + self.arn = arn + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct Import: AWSDecodableShape { + /// The name of the import. + public let name: String + /// The revision of the import. + public let revision: String + + public init(name: String, revision: String) { + self.name = name + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case revision = "revision" + } + } + + public struct ListAssetRevisionsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")) + ] + + /// The identifier of the domain. + public let domainIdentifier: String + /// The identifier of the asset. + public let identifier: String + /// The maximum number of revisions to return in a single call to ListAssetRevisions. When the number of revisions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListAssetRevisions to list the next set of revisions. + public let maxResults: Int? + /// When the number of revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetRevisions to list the next set of revisions. + public let nextToken: String? + + public init(domainIdentifier: String, identifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAssetRevisionsOutput: AWSDecodableShape { + /// The results of the ListAssetRevisions action. + public let items: [AssetRevision]? + /// When the number of revisions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of revisions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListAssetRevisions to list the next set of revisions. + public let nextToken: String? + + public init(items: [AssetRevision]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListDataSourceRunActivitiesInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "status", location: .querystring("status")) + ] + + /// The identifier of the Amazon DataZone domain in which to list data source run activities. + public let domainIdentifier: String + /// The identifier of the data source run. + public let identifier: String + /// The maximum number of activities to return in a single call to ListDataSourceRunActivities. When the number of activities to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDataSourceRunActivities to list the next set of activities. + public let maxResults: Int? + /// When the number of activities is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of activities, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataSourceRunActivities to list the next set of activities. + public let nextToken: String? + /// The status of the data source run. + public let status: DataAssetActivityStatus? + + public init(domainIdentifier: String, identifier: String, maxResults: Int? = nil, nextToken: String? = nil, status: DataAssetActivityStatus? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.maxResults = maxResults + self.nextToken = nextToken + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListDataSourceRunActivitiesOutput: AWSDecodableShape { + /// The results of the ListDataSourceRunActivities action. + public let items: [DataSourceRunActivity] + /// When the number of activities is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of activities, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataSourceRunActivities to list the next set of activities. + public let nextToken: String? + + public init(items: [DataSourceRunActivity], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListDataSourceRunsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "dataSourceIdentifier", location: .uri("dataSourceIdentifier")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "status", location: .querystring("status")) + ] + + /// The identifier of the data source. + public let dataSourceIdentifier: String + /// The identifier of the Amazon DataZone domain in which to invoke the ListDataSourceRuns action. + public let domainIdentifier: String + /// The maximum number of runs to return in a single call to ListDataSourceRuns. When the number of runs to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDataSourceRuns to list the next set of runs. + public let maxResults: Int? + /// When the number of runs is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of runs, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataSourceRuns to list the next set of runs. + public let nextToken: String? + /// The status of the data source. + public let status: DataSourceRunStatus? + + public init(dataSourceIdentifier: String, domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, status: DataSourceRunStatus? = nil) { + self.dataSourceIdentifier = dataSourceIdentifier + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.dataSourceIdentifier, name: "dataSourceIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListDataSourceRunsOutput: AWSDecodableShape { + /// The results of the ListDataSourceRuns action. + public let items: [DataSourceRunSummary] + /// When the number of runs is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of runs, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataSourceRuns to list the next set of runs. + public let nextToken: String? + + public init(items: [DataSourceRunSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListDataSourcesInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentIdentifier", location: .querystring("environmentIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "name", location: .querystring("name")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "projectIdentifier", location: .querystring("projectIdentifier")), + AWSMemberEncoding(label: "status", location: .querystring("status")), + AWSMemberEncoding(label: "type", location: .querystring("type")) + ] + + /// The identifier of the Amazon DataZone domain in which to list the data sources. + public let domainIdentifier: String + /// The identifier of the environment in which to list the data sources. + public let environmentIdentifier: String? + /// The maximum number of data sources to return in a single call to ListDataSources. When the number of data sources to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDataSources to list the next set of data sources. + public let maxResults: Int? + /// The name of the data source. + public let name: String? + /// When the number of data sources is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of data sources, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataSources to list the next set of data sources. + public let nextToken: String? + /// The identifier of the project in which to list data sources. + public let projectIdentifier: String + /// The status of the data source. + public let status: DataSourceStatus? + /// The type of the data source. + public let type: String? + + public init(domainIdentifier: String, environmentIdentifier: String? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, projectIdentifier: String, status: DataSourceStatus? = nil, type: String? = nil) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.maxResults = maxResults + self.name = name + self.nextToken = nextToken + self.projectIdentifier = projectIdentifier + self.status = status + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.type, name: "type", parent: name, max: 256) + try self.validate(self.type, name: "type", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListDataSourcesOutput: AWSDecodableShape { + /// The results of the ListDataSources action. + public let items: [DataSourceSummary] + /// When the number of data sources is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of data sources, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDataSources to list the next set of data sources. + public let nextToken: String? + + public init(items: [DataSourceSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListDomainsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "status", location: .querystring("status")) + ] + + /// The maximum number of domains to return in a single call to ListDomains. When the number of domains to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListDomains to list the next set of domains. + public let maxResults: Int? + /// When the number of domains is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of domains, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDomains to list the next set of domains. + public let nextToken: String? + /// The status of the data source. + public let status: DomainStatus? + + public init(maxResults: Int? = nil, nextToken: String? = nil, status: DomainStatus? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListDomainsOutput: AWSDecodableShape { + /// The results of the ListDomains action. + public let items: [DomainSummary] + /// When the number of domains is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of domains, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListDomains to list the next set of domains. + public let nextToken: String? + + public init(items: [DomainSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListEnvironmentBlueprintConfigurationsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")) + ] + + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The maximum number of blueprint configurations to return in a single call to ListEnvironmentBlueprintConfigurations. When the number of configurations to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEnvironmentBlueprintConfigurations to list the next set of configurations. + public let maxResults: Int? + /// When the number of blueprint configurations is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of configurations, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentBlueprintConfigurations to list the next set of configurations. + public let nextToken: String? + + public init(domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListEnvironmentBlueprintConfigurationsOutput: AWSDecodableShape { + /// The results of the ListEnvironmentBlueprintConfigurations action. + public let items: [EnvironmentBlueprintConfigurationItem]? + /// When the number of blueprint configurations is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of configurations, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentBlueprintConfigurations to list the next set of configurations. + public let nextToken: String? + + public init(items: [EnvironmentBlueprintConfigurationItem]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListEnvironmentBlueprintsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "managed", location: .querystring("managed")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "name", location: .querystring("name")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")) + ] + + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// Specifies whether the environment blueprint is managed by Amazon DataZone. + public let managed: Bool? + /// The maximum number of blueprints to return in a single call to ListEnvironmentBlueprints. When the number of blueprints to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEnvironmentBlueprints to list the next set of blueprints. + public let maxResults: Int? + /// The name of the Amazon DataZone environment. + public let name: String? + /// When the number of blueprints in the environment is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of blueprints in the environment, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentBlueprintsto list the next set of blueprints. + public let nextToken: String? + + public init(domainIdentifier: String, managed: Bool? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil) { + self.domainIdentifier = domainIdentifier + self.managed = managed + self.maxResults = maxResults + self.name = name + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListEnvironmentBlueprintsOutput: AWSDecodableShape { + /// The results of the ListEnvironmentBlueprints action. + public let items: [EnvironmentBlueprintSummary] + /// When the number of blueprints in the environment is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of blueprints in the environment, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentBlueprintsto list the next set of blueprints. + public let nextToken: String? + + public init(items: [EnvironmentBlueprintSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListEnvironmentProfilesInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "awsAccountId", location: .querystring("awsAccountId")), + AWSMemberEncoding(label: "awsAccountRegion", location: .querystring("awsAccountRegion")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentBlueprintIdentifier", location: .querystring("environmentBlueprintIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "name", location: .querystring("name")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "projectIdentifier", location: .querystring("projectIdentifier")) + ] + + /// The identifier of the Amazon Web Services account where you want to list environment profiles. + public let awsAccountId: String? + /// The Amazon Web Services region where you want to list environment profiles. + public let awsAccountRegion: String? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The identifier of the blueprint that was used to create the environment profiles that you want to list. + public let environmentBlueprintIdentifier: String? + /// The maximum number of environment profiles to return in a single call to ListEnvironmentProfiles. When the number of environment profiles to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEnvironmentProfiles to list the next set of environment profiles. + public let maxResults: Int? + public let name: String? + /// When the number of environment profiles is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environment profiles, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentProfiles to list the next set of environment profiles. + public let nextToken: String? + /// The identifier of the Amazon DataZone project. + public let projectIdentifier: String? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, domainIdentifier: String, environmentBlueprintIdentifier: String? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, projectIdentifier: String? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.domainIdentifier = domainIdentifier + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier + self.maxResults = maxResults + self.name = name + self.nextToken = nextToken + self.projectIdentifier = projectIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^\\d{12}$") + try self.validate(self.awsAccountRegion, name: "awsAccountRegion", parent: name, pattern: "^[a-z]{2}-[a-z]{4,10}-\\d$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListEnvironmentProfilesOutput: AWSDecodableShape { + /// The results of the ListEnvironmentProfiles action. + public let items: [EnvironmentProfileSummary] + /// When the number of environment profiles is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environment profiles, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentProfiles to list the next set of environment profiles. + public let nextToken: String? + + public init(items: [EnvironmentProfileSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListEnvironmentsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "awsAccountId", location: .querystring("awsAccountId")), + AWSMemberEncoding(label: "awsAccountRegion", location: .querystring("awsAccountRegion")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentBlueprintIdentifier", location: .querystring("environmentBlueprintIdentifier")), + AWSMemberEncoding(label: "environmentProfileIdentifier", location: .querystring("environmentProfileIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "name", location: .querystring("name")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "projectIdentifier", location: .querystring("projectIdentifier")), + AWSMemberEncoding(label: "provider", location: .querystring("provider")), + AWSMemberEncoding(label: "status", location: .querystring("status")) + ] + + /// The identifier of the Amazon Web Services account where you want to list environments. + public let awsAccountId: String? + /// The Amazon Web Services region where you want to list environments. + public let awsAccountRegion: String? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The identifier of the Amazon DataZone blueprint. + public let environmentBlueprintIdentifier: String? + /// The identifier of the environment profile. + public let environmentProfileIdentifier: String? + /// The maximum number of environments to return in a single call to ListEnvironments. When the number of environments to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEnvironments to list the next set of environments. + public let maxResults: Int? + public let name: String? + /// When the number of environments is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environments, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironments to list the next set of environments. + public let nextToken: String? + /// The identifier of the Amazon DataZone project. + public let projectIdentifier: String + /// The provider of the environment. + public let provider: String? + /// The status of the environments that you want to list. + public let status: EnvironmentStatus? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, domainIdentifier: String, environmentBlueprintIdentifier: String? = nil, environmentProfileIdentifier: String? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, projectIdentifier: String, provider: String? = nil, status: EnvironmentStatus? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.domainIdentifier = domainIdentifier + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier + self.environmentProfileIdentifier = environmentProfileIdentifier + self.maxResults = maxResults + self.name = name + self.nextToken = nextToken + self.projectIdentifier = projectIdentifier + self.provider = provider + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^\\d{12}$") + try self.validate(self.awsAccountRegion, name: "awsAccountRegion", parent: name, pattern: "^[a-z]{2}-[a-z]{4,10}-\\d$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentProfileIdentifier, name: "environmentProfileIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListEnvironmentsOutput: AWSDecodableShape { + /// The results of the ListEnvironments action. + public let items: [EnvironmentSummary] + /// When the number of environments is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environments, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironments to list the next set of environments. + public let nextToken: String? + + public init(items: [EnvironmentSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListNotificationsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "afterTimestamp", location: .querystring("afterTimestamp")), + AWSMemberEncoding(label: "beforeTimestamp", location: .querystring("beforeTimestamp")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "subjects", location: .querystring("subjects")), + AWSMemberEncoding(label: "taskStatus", location: .querystring("taskStatus")), + AWSMemberEncoding(label: "type", location: .querystring("type")) + ] + + /// The time after which you want to list notifications. + public let afterTimestamp: Date? + /// The time before which you want to list notifications. + public let beforeTimestamp: Date? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The maximum number of notifications to return in a single call to ListNotifications. When the number of notifications to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListNotifications to list the next set of notifications. + public let maxResults: Int? + /// When the number of notifications is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of notifications, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListNotifications to list the next set of notifications. + public let nextToken: String? + /// The subjects of notifications. + public let subjects: [String]? + /// The task status of notifications. + public let taskStatus: TaskStatus? + /// The type of notifications. + public let type: NotificationType + + public init(afterTimestamp: Date? = nil, beforeTimestamp: Date? = nil, domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, subjects: [String]? = nil, taskStatus: TaskStatus? = nil, type: NotificationType) { + self.afterTimestamp = afterTimestamp + self.beforeTimestamp = beforeTimestamp + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.subjects = subjects + self.taskStatus = taskStatus + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListNotificationsOutput: AWSDecodableShape { + /// When the number of notifications is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of notifications, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListNotifications to list the next set of notifications. + public let nextToken: String? + /// The results of the ListNotifications action. + public let notifications: [NotificationOutput]? + + public init(nextToken: String? = nil, notifications: [NotificationOutput]? = nil) { + self.nextToken = nextToken + self.notifications = notifications + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case notifications = "notifications" + } + } + + public struct ListProjectMembershipsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "projectIdentifier", location: .uri("projectIdentifier")), + AWSMemberEncoding(label: "sortBy", location: .querystring("sortBy")), + AWSMemberEncoding(label: "sortOrder", location: .querystring("sortOrder")) + ] + + /// The identifier of the Amazon DataZone domain in which you want to list project memberships. + public let domainIdentifier: String + /// The maximum number of memberships to return in a single call to ListProjectMemberships. When the number of memberships to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListProjectMemberships to list the next set of memberships. + public let maxResults: Int? + /// When the number of memberships is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of memberships, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListProjectMemberships to list the next set of memberships. + public let nextToken: String? + /// The identifier of the project whose memberships you want to list. + public let projectIdentifier: String + /// The method by which you want to sort the project memberships. + public let sortBy: SortFieldProject? + /// The sort order of the project memberships. + public let sortOrder: SortOrder? + + public init(domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, projectIdentifier: String, sortBy: SortFieldProject? = nil, sortOrder: SortOrder? = nil) { + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.projectIdentifier = projectIdentifier + self.sortBy = sortBy + self.sortOrder = sortOrder + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.projectIdentifier, name: "projectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListProjectMembershipsOutput: AWSDecodableShape { + /// The members of the project. + public let members: [ProjectMember] + /// When the number of memberships is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of memberships, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListProjectMemberships to list the next set of memberships. + public let nextToken: String? + + public init(members: [ProjectMember], nextToken: String? = nil) { + self.members = members + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case members = "members" + case nextToken = "nextToken" + } + } + + public struct ListProjectsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "groupIdentifier", location: .querystring("groupIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "name", location: .querystring("name")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "userIdentifier", location: .querystring("userIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The identifier of a group. + public let groupIdentifier: String? + /// The maximum number of projects to return in a single call to ListProjects. When the number of projects to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListProjects to list the next set of projects. + public let maxResults: Int? + public let name: String? + /// When the number of projects is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of projects, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListProjects to list the next set of projects. + public let nextToken: String? + /// The identifier of the Amazon DataZone user. + public let userIdentifier: String? + + public init(domainIdentifier: String, groupIdentifier: String? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, userIdentifier: String? = nil) { + self.domainIdentifier = domainIdentifier + self.groupIdentifier = groupIdentifier + self.maxResults = maxResults + self.name = name + self.nextToken = nextToken + self.userIdentifier = userIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListProjectsOutput: AWSDecodableShape { + /// The results of the ListProjects action. + public let items: [ProjectSummary]? + /// When the number of projects is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of projects, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListProjects to list the next set of projects. + public let nextToken: String? + + public init(items: [ProjectSummary]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListSubscriptionGrantsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentId", location: .querystring("environmentId")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "sortBy", location: .querystring("sortBy")), + AWSMemberEncoding(label: "sortOrder", location: .querystring("sortOrder")), + AWSMemberEncoding(label: "subscribedListingId", location: .querystring("subscribedListingId")), + AWSMemberEncoding(label: "subscriptionId", location: .querystring("subscriptionId")), + AWSMemberEncoding(label: "subscriptionTargetId", location: .querystring("subscriptionTargetId")) + ] + + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The identifier of the Amazon DataZone environment. + public let environmentId: String? + /// The maximum number of subscription grants to return in a single call to ListSubscriptionGrants. When the number of subscription grants to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListSubscriptionGrants to list the next set of subscription grants. + public let maxResults: Int? + /// When the number of subscription grants is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription grants, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionGrants to list the next set of subscription grants. + public let nextToken: String? + /// Specifies the way of sorting the results of this action. + public let sortBy: SortKey? + /// Specifies the sort order of this action. + public let sortOrder: SortOrder? + /// The identifier of the subscribed listing. + public let subscribedListingId: String? + /// The identifier of the subscription. + public let subscriptionId: String? + /// The identifier of the subscription target. + public let subscriptionTargetId: String? + + public init(domainIdentifier: String, environmentId: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, sortBy: SortKey? = nil, sortOrder: SortOrder? = nil, subscribedListingId: String? = nil, subscriptionId: String? = nil, subscriptionTargetId: String? = nil) { + self.domainIdentifier = domainIdentifier + self.environmentId = environmentId + self.maxResults = maxResults + self.nextToken = nextToken + self.sortBy = sortBy + self.sortOrder = sortOrder + self.subscribedListingId = subscribedListingId + self.subscriptionId = subscriptionId + self.subscriptionTargetId = subscriptionTargetId + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentId, name: "environmentId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.subscribedListingId, name: "subscribedListingId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.subscriptionId, name: "subscriptionId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.subscriptionTargetId, name: "subscriptionTargetId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSubscriptionGrantsOutput: AWSDecodableShape { + /// The results of the ListSubscriptionGrants action. + public let items: [SubscriptionGrantSummary] + /// When the number of subscription grants is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription grants, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionGrants to list the next set of subscription grants. + public let nextToken: String? + + public init(items: [SubscriptionGrantSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListSubscriptionRequestsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "approverProjectId", location: .querystring("approverProjectId")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "owningProjectId", location: .querystring("owningProjectId")), + AWSMemberEncoding(label: "sortBy", location: .querystring("sortBy")), + AWSMemberEncoding(label: "sortOrder", location: .querystring("sortOrder")), + AWSMemberEncoding(label: "status", location: .querystring("status")), + AWSMemberEncoding(label: "subscribedListingId", location: .querystring("subscribedListingId")) + ] + + /// The identifier of the subscription request approver's project. + public let approverProjectId: String? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The maximum number of subscription requests to return in a single call to ListSubscriptionRequests. When the number of subscription requests to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListSubscriptionRequests to list the next set of subscription requests. + public let maxResults: Int? + /// When the number of subscription requests is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription requests, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionRequests to list the next set of subscription requests. + public let nextToken: String? + /// The identifier of the project for the subscription requests. + public let owningProjectId: String? + /// Specifies the way to sort the results of this action. + public let sortBy: SortKey? + /// Specifies the sort order for the results of this action. + public let sortOrder: SortOrder? + /// Specifies the status of the subscription requests. + public let status: SubscriptionRequestStatus? + /// The identifier of the subscribed listing. + public let subscribedListingId: String? + + public init(approverProjectId: String? = nil, domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, owningProjectId: String? = nil, sortBy: SortKey? = nil, sortOrder: SortOrder? = nil, status: SubscriptionRequestStatus? = nil, subscribedListingId: String? = nil) { + self.approverProjectId = approverProjectId + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.owningProjectId = owningProjectId + self.sortBy = sortBy + self.sortOrder = sortOrder + self.status = status + self.subscribedListingId = subscribedListingId + } + + public func validate(name: String) throws { + try self.validate(self.approverProjectId, name: "approverProjectId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.owningProjectId, name: "owningProjectId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.subscribedListingId, name: "subscribedListingId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSubscriptionRequestsOutput: AWSDecodableShape { + /// The results of the ListSubscriptionRequests action. + public let items: [SubscriptionRequestSummary] + /// When the number of subscription requests is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription requests, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionRequests to list the next set of subscription requests. + public let nextToken: String? + + public init(items: [SubscriptionRequestSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListSubscriptionTargetsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentIdentifier", location: .uri("environmentIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "sortBy", location: .querystring("sortBy")), + AWSMemberEncoding(label: "sortOrder", location: .querystring("sortOrder")) + ] + + /// The identifier of the Amazon DataZone domain where you want to list subscription targets. + public let domainIdentifier: String + /// The identifier of the environment where you want to list subscription targets. + public let environmentIdentifier: String + /// The maximum number of subscription targets to return in a single call to ListSubscriptionTargets. When the number of subscription targets to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListSubscriptionTargets to list the next set of subscription targets. + public let maxResults: Int? + /// When the number of subscription targets is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription targets, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionTargets to list the next set of subscription targets. + public let nextToken: String? + /// Specifies the way in which the results of this action are to be sorted. + public let sortBy: SortKey? + /// Specifies the sort order for the results of this action. + public let sortOrder: SortOrder? + + public init(domainIdentifier: String, environmentIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, sortBy: SortKey? = nil, sortOrder: SortOrder? = nil) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.sortBy = sortBy + self.sortOrder = sortOrder + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSubscriptionTargetsOutput: AWSDecodableShape { + /// The results of the ListSubscriptionTargets action. + public let items: [SubscriptionTargetSummary] + /// When the number of subscription targets is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscription targets, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionTargets to list the next set of subscription targets. + public let nextToken: String? + + public init(items: [SubscriptionTargetSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListSubscriptionsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "approverProjectId", location: .querystring("approverProjectId")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "maxResults", location: .querystring("maxResults")), + AWSMemberEncoding(label: "nextToken", location: .querystring("nextToken")), + AWSMemberEncoding(label: "owningProjectId", location: .querystring("owningProjectId")), + AWSMemberEncoding(label: "sortBy", location: .querystring("sortBy")), + AWSMemberEncoding(label: "sortOrder", location: .querystring("sortOrder")), + AWSMemberEncoding(label: "status", location: .querystring("status")), + AWSMemberEncoding(label: "subscribedListingId", location: .querystring("subscribedListingId")), + AWSMemberEncoding(label: "subscriptionRequestIdentifier", location: .querystring("subscriptionRequestIdentifier")) + ] + + /// The identifier of the project for the subscription's approver. + public let approverProjectId: String? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The maximum number of subscriptions to return in a single call to ListSubscriptions. When the number of subscriptions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListSubscriptions to list the next set of Subscriptions. + public let maxResults: Int? + /// When the number of subscriptions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscriptions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptions to list the next set of subscriptions. + public let nextToken: String? + /// The identifier of the owning project. + public let owningProjectId: String? + /// Specifies the way in which the results of this action are to be sorted. + public let sortBy: SortKey? + /// Specifies the sort order for the results of this action. + public let sortOrder: SortOrder? + /// The status of the subscriptions that you want to list. + public let status: SubscriptionStatus? + /// The identifier of the subscribed listing for the subscriptions that you want to list. + public let subscribedListingId: String? + /// The identifier of the subscription request for the subscriptions that you want to list. + public let subscriptionRequestIdentifier: String? + + public init(approverProjectId: String? = nil, domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, owningProjectId: String? = nil, sortBy: SortKey? = nil, sortOrder: SortOrder? = nil, status: SubscriptionStatus? = nil, subscribedListingId: String? = nil, subscriptionRequestIdentifier: String? = nil) { + self.approverProjectId = approverProjectId + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.owningProjectId = owningProjectId + self.sortBy = sortBy + self.sortOrder = sortOrder + self.status = status + self.subscribedListingId = subscribedListingId + self.subscriptionRequestIdentifier = subscriptionRequestIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.approverProjectId, name: "approverProjectId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.owningProjectId, name: "owningProjectId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.subscribedListingId, name: "subscribedListingId", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.subscriptionRequestIdentifier, name: "subscriptionRequestIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSubscriptionsOutput: AWSDecodableShape { + /// The results of the ListSubscriptions action. + public let items: [SubscriptionSummary] + /// When the number of subscriptions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of subscriptions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptions to list the next set of subscriptions. + public let nextToken: String? + + public init(items: [SubscriptionSummary], nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "resourceArn", location: .uri("resourceArn")) + ] + + /// The ARN of the resource whose tags you want to list. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The tags of the specified resource. + public let tags: [String: String]? + + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct ListingRevision: AWSDecodableShape { + /// An identifier of a revision of an asset published in a Amazon DataZone catalog. + public let id: String + /// The details of a revision of an asset published in a Amazon DataZone catalog. + public let revision: String + + public init(id: String, revision: String) { + self.id = id + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case revision = "revision" + } + } + + public struct ListingRevisionInput: AWSEncodableShape { + /// An identifier of revision to be made to an asset published in a Amazon DataZone catalog. + public let identifier: String + /// The details of a revision to be made to an asset published in a Amazon DataZone catalog. + public let revision: String + + public init(identifier: String, revision: String) { + self.identifier = identifier + self.revision = revision + } + + public func validate(name: String) throws { + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case identifier = "identifier" + case revision = "revision" + } + } + + public struct NotificationOutput: AWSDecodableShape { + /// The action link included in the notification. + public let actionLink: String + /// The timestamp of when a notification was created. + public let creationTimestamp: Date + /// The identifier of a Amazon DataZone domain in which the notification exists. + public let domainIdentifier: String + /// The identifier of the notification. + public let identifier: String + /// The timestamp of when the notification was last updated. + public let lastUpdatedTimestamp: Date + /// The message included in the notification. + public let message: String + /// The metadata included in the notification. + public let metadata: [String: String]? + /// The status included in the notification. + public let status: TaskStatus? + /// The title of the notification. + public let title: String + /// The topic of the notification. + public let topic: Topic + /// The type of the notification. + public let type: NotificationType + + public init(actionLink: String, creationTimestamp: Date, domainIdentifier: String, identifier: String, lastUpdatedTimestamp: Date, message: String, metadata: [String: String]? = nil, status: TaskStatus? = nil, title: String, topic: Topic, type: NotificationType) { + self.actionLink = actionLink + self.creationTimestamp = creationTimestamp + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.lastUpdatedTimestamp = lastUpdatedTimestamp + self.message = message + self.metadata = metadata + self.status = status + self.title = title + self.topic = topic + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case actionLink = "actionLink" + case creationTimestamp = "creationTimestamp" + case domainIdentifier = "domainIdentifier" + case identifier = "identifier" + case lastUpdatedTimestamp = "lastUpdatedTimestamp" + case message = "message" + case metadata = "metadata" + case status = "status" + case title = "title" + case topic = "topic" + case type = "type" + } + } + + public struct NotificationResource: AWSDecodableShape { + /// The ID of the resource mentioned in a notification. + public let id: String + /// The name of the resource mentioned in a notification. + public let name: String? + /// The type of the resource mentioned in a notification. + public let type: NotificationResourceType + + public init(id: String, name: String? = nil, type: NotificationResourceType) { + self.id = id + self.name = name + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case name = "name" + case type = "type" + } + } + + public struct PredictionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The business name generation mechanism. + public let businessNameGeneration: BusinessNameGenerationConfiguration? + + public init(businessNameGeneration: BusinessNameGenerationConfiguration? = nil) { + self.businessNameGeneration = businessNameGeneration + } + + private enum CodingKeys: String, CodingKey { + case businessNameGeneration = "businessNameGeneration" + } + } + + public struct ProjectMember: AWSDecodableShape { + /// The designated role of a project member. + public let designation: UserDesignation + /// The membership details of a project member. + public let memberDetails: MemberDetails + + public init(designation: UserDesignation, memberDetails: MemberDetails) { + self.designation = designation + self.memberDetails = memberDetails + } + + private enum CodingKeys: String, CodingKey { + case designation = "designation" + case memberDetails = "memberDetails" + } + } + + public struct ProjectSummary: AWSDecodableShape { + /// The timestamp of when a project was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the project. + public let createdBy: String + /// The description of a project. + public let description: String? + /// The identifier of a Amazon DataZone domain where the project exists. + public let domainId: String + /// The identifier of a project. + public let id: String + /// The name of a project. + public let name: String + /// The timestamp of when the project was updated. + public let updatedAt: Date? + + public init(createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, id: String, name: String, updatedAt: Date? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.id = id + self.name = name + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case id = "id" + case name = "name" + case updatedAt = "updatedAt" + } + } + + public struct PutEnvironmentBlueprintConfigurationInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentBlueprintIdentifier", location: .uri("environmentBlueprintIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// Specifies the enabled Amazon Web Services Regions. + public let enabledRegions: [String] + /// The identifier of the environment blueprint. + public let environmentBlueprintIdentifier: String + /// The ARN of the manage access role. + public let manageAccessRoleArn: String? + /// The ARN of the provisioning role. + public let provisioningRoleArn: String? + /// The regional parameters in the environment blueprint. + public let regionalParameters: [String: [String: String]]? + + public init(domainIdentifier: String, enabledRegions: [String], environmentBlueprintIdentifier: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil) { + self.domainIdentifier = domainIdentifier + self.enabledRegions = enabledRegions + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier + self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningRoleArn = provisioningRoleArn + self.regionalParameters = regionalParameters + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.enabledRegions.forEach { + try validate($0, name: "enabledRegions[]", parent: name, max: 16) + try validate($0, name: "enabledRegions[]", parent: name, min: 4) + try validate($0, name: "enabledRegions[]", parent: name, pattern: "^[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]$") + } + try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.manageAccessRoleArn, name: "manageAccessRoleArn", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") + try self.validate(self.provisioningRoleArn, name: "provisioningRoleArn", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") + try self.regionalParameters?.forEach { + try validate($0.key, name: "regionalParameters.key", parent: name, max: 16) + try validate($0.key, name: "regionalParameters.key", parent: name, min: 4) + try validate($0.key, name: "regionalParameters.key", parent: name, pattern: "^[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]$") + } + } + + private enum CodingKeys: String, CodingKey { + case enabledRegions = "enabledRegions" + case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningRoleArn = "provisioningRoleArn" + case regionalParameters = "regionalParameters" + } + } + + public struct PutEnvironmentBlueprintConfigurationOutput: AWSDecodableShape { + /// The timestamp of when the environment blueprint was created. + public let createdAt: Date? + /// The identifier of the Amazon DataZone domain. + public let domainId: String + /// Specifies the enabled Amazon Web Services Regions. + public let enabledRegions: [String]? + /// The identifier of the environment blueprint. + public let environmentBlueprintId: String + /// The ARN of the manage access role. + public let manageAccessRoleArn: String? + /// The ARN of the provisioning role. + public let provisioningRoleArn: String? + /// The regional parameters in the environment blueprint. + public let regionalParameters: [String: [String: String]]? + /// The timestamp of when the environment blueprint was updated. + public let updatedAt: Date? + + public init(createdAt: Date? = nil, domainId: String, enabledRegions: [String]? = nil, environmentBlueprintId: String, manageAccessRoleArn: String? = nil, provisioningRoleArn: String? = nil, regionalParameters: [String: [String: String]]? = nil, updatedAt: Date? = nil) { + self.createdAt = createdAt + self.domainId = domainId + self.enabledRegions = enabledRegions + self.environmentBlueprintId = environmentBlueprintId + self.manageAccessRoleArn = manageAccessRoleArn + self.provisioningRoleArn = provisioningRoleArn + self.regionalParameters = regionalParameters + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case domainId = "domainId" + case enabledRegions = "enabledRegions" + case environmentBlueprintId = "environmentBlueprintId" + case manageAccessRoleArn = "manageAccessRoleArn" + case provisioningRoleArn = "provisioningRoleArn" + case regionalParameters = "regionalParameters" + case updatedAt = "updatedAt" + } + } + + public struct RecommendationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether automatic business name generation is to be enabled or not as part of the recommendation configuration. + public let enableBusinessNameGeneration: Bool? + + public init(enableBusinessNameGeneration: Bool? = nil) { + self.enableBusinessNameGeneration = enableBusinessNameGeneration + } + + private enum CodingKeys: String, CodingKey { + case enableBusinessNameGeneration = "enableBusinessNameGeneration" + } + } + + public struct RedshiftClusterStorage: AWSEncodableShape & AWSDecodableShape { + /// The name of an Amazon Redshift cluster. + public let clusterName: String + + public init(clusterName: String) { + self.clusterName = clusterName + } + + private enum CodingKeys: String, CodingKey { + case clusterName = "clusterName" + } + } + + public struct RedshiftCredentialConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The ARN of a secret manager for an Amazon Redshift cluster. + public let secretManagerArn: String + + public init(secretManagerArn: String) { + self.secretManagerArn = secretManagerArn + } + + private enum CodingKeys: String, CodingKey { + case secretManagerArn = "secretManagerArn" + } + } + + public struct RedshiftRunConfigurationInput: AWSEncodableShape { + /// The data access role included in the configuration details of the Amazon Redshift data source. + public let dataAccessRole: String? + public let redshiftCredentialConfiguration: RedshiftCredentialConfiguration + public let redshiftStorage: RedshiftStorage + /// The relational filger configurations included in the configuration details of the Amazon Redshift data source. + public let relationalFilterConfigurations: [RelationalFilterConfiguration] + + public init(dataAccessRole: String? = nil, redshiftCredentialConfiguration: RedshiftCredentialConfiguration, redshiftStorage: RedshiftStorage, relationalFilterConfigurations: [RelationalFilterConfiguration]) { + self.dataAccessRole = dataAccessRole + self.redshiftCredentialConfiguration = redshiftCredentialConfiguration + self.redshiftStorage = redshiftStorage + self.relationalFilterConfigurations = relationalFilterConfigurations + } + + private enum CodingKeys: String, CodingKey { + case dataAccessRole = "dataAccessRole" + case redshiftCredentialConfiguration = "redshiftCredentialConfiguration" + case redshiftStorage = "redshiftStorage" + case relationalFilterConfigurations = "relationalFilterConfigurations" + } + } + + public struct RedshiftRunConfigurationOutput: AWSDecodableShape { + /// The ID of the Amazon Web Services account included in the configuration details of the Amazon Redshift data source. + public let accountId: String? + /// The data access role included in the configuration details of the Amazon Redshift data source. + public let dataAccessRole: String? + public let redshiftCredentialConfiguration: RedshiftCredentialConfiguration + public let redshiftStorage: RedshiftStorage + /// The Amazon Web Services region included in the configuration details of the Amazon Redshift data source. + public let region: String? + /// The relational filger configurations included in the configuration details of the Amazon Redshift data source. + public let relationalFilterConfigurations: [RelationalFilterConfiguration] + + public init(accountId: String? = nil, dataAccessRole: String? = nil, redshiftCredentialConfiguration: RedshiftCredentialConfiguration, redshiftStorage: RedshiftStorage, region: String? = nil, relationalFilterConfigurations: [RelationalFilterConfiguration]) { + self.accountId = accountId + self.dataAccessRole = dataAccessRole + self.redshiftCredentialConfiguration = redshiftCredentialConfiguration + self.redshiftStorage = redshiftStorage + self.region = region + self.relationalFilterConfigurations = relationalFilterConfigurations + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case dataAccessRole = "dataAccessRole" + case redshiftCredentialConfiguration = "redshiftCredentialConfiguration" + case redshiftStorage = "redshiftStorage" + case region = "region" + case relationalFilterConfigurations = "relationalFilterConfigurations" + } + } + + public struct RedshiftServerlessStorage: AWSEncodableShape & AWSDecodableShape { + /// The name of the Amazon Redshift Serverless workgroup. + public let workgroupName: String + + public init(workgroupName: String) { + self.workgroupName = workgroupName + } + + private enum CodingKeys: String, CodingKey { + case workgroupName = "workgroupName" + } + } + + public struct RejectChoice: AWSEncodableShape { + /// Specifies the the automatically generated business metadata that can be rejected. + public let predictionChoices: [Int]? + /// Specifies the target (for example, a column name) where a prediction can be rejected. + public let predictionTarget: String? + + public init(predictionChoices: [Int]? = nil, predictionTarget: String? = nil) { + self.predictionChoices = predictionChoices + self.predictionTarget = predictionTarget + } + + private enum CodingKeys: String, CodingKey { + case predictionChoices = "predictionChoices" + case predictionTarget = "predictionTarget" + } + } + + public struct RejectPredictionsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")), + AWSMemberEncoding(label: "revision", location: .querystring("revision")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// The identifier of the prediction. + public let identifier: String + public let rejectChoices: [RejectChoice]? + public let rejectRule: RejectRule? + public let revision: String? + + public init(clientToken: String? = RejectPredictionsInput.idempotencyToken(), domainIdentifier: String, identifier: String, rejectChoices: [RejectChoice]? = nil, rejectRule: RejectRule? = nil, revision: String? = nil) { + self.clientToken = clientToken + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.rejectChoices = rejectChoices + self.rejectRule = rejectRule + self.revision = revision + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.revision, name: "revision", parent: name, max: 64) + try self.validate(self.revision, name: "revision", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case rejectChoices = "rejectChoices" + case rejectRule = "rejectRule" + } + } + + public struct RejectPredictionsOutput: AWSDecodableShape { + public let assetId: String + public let assetRevision: String + public let domainId: String + + public init(assetId: String, assetRevision: String, domainId: String) { + self.assetId = assetId + self.assetRevision = assetRevision + self.domainId = domainId + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case assetRevision = "assetRevision" + case domainId = "domainId" + } + } + + public struct RejectRule: AWSEncodableShape { + /// Specifies whether you want to reject the top prediction for all targets or none. + public let rule: RejectRuleBehavior? + /// The confidence score that specifies the condition at which a prediction can be rejected. + public let threshold: Float? + + public init(rule: RejectRuleBehavior? = nil, threshold: Float? = nil) { + self.rule = rule + self.threshold = threshold + } + + private enum CodingKeys: String, CodingKey { + case rule = "rule" + case threshold = "threshold" + } + } + + public struct RejectSubscriptionRequestInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The decision comment of the rejected subscription request. + public let decisionComment: String? + /// The identifier of the Amazon DataZone domain in which the subscription request was rejected. + public let domainIdentifier: String + /// The identifier of the subscription request that was rejected. + public let identifier: String + + public init(decisionComment: String? = nil, domainIdentifier: String, identifier: String) { + self.decisionComment = decisionComment + self.domainIdentifier = domainIdentifier + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.decisionComment, name: "decisionComment", parent: name, max: 4096) + try self.validate(self.decisionComment, name: "decisionComment", parent: name, min: 1) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case decisionComment = "decisionComment" + } + } + + public struct RejectSubscriptionRequestOutput: AWSDecodableShape { + /// The timestamp of when the subscription request was rejected. + public let createdAt: Date + /// The timestamp of when the subscription request was rejected. + public let createdBy: String + /// The decision comment of the rejected subscription request. + public let decisionComment: String? + /// The identifier of the Amazon DataZone domain in which the subscription request was rejected. + public let domainId: String + /// The identifier of the subscription request that was rejected. + public let id: String + /// The reason for the subscription request. + public let requestReason: String + /// The identifier of the subscription request reviewer. + public let reviewerId: String? + /// The status of the subscription request. + public let status: SubscriptionRequestStatus + /// The subscribed listings of the subscription request. + public let subscribedListings: [SubscribedListing] + /// The subscribed principals of the subscription request. + public let subscribedPrincipals: [SubscribedPrincipal] + /// The timestamp of when the subscription request was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription request. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, decisionComment: String? = nil, domainId: String, id: String, requestReason: String, reviewerId: String? = nil, status: SubscriptionRequestStatus, subscribedListings: [SubscribedListing], subscribedPrincipals: [SubscribedPrincipal], updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.decisionComment = decisionComment + self.domainId = domainId + self.id = id + self.requestReason = requestReason + self.reviewerId = reviewerId + self.status = status + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case decisionComment = "decisionComment" + case domainId = "domainId" + case id = "id" + case requestReason = "requestReason" + case reviewerId = "reviewerId" + case status = "status" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct RelationalFilterConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The database name specified in the relational filter configuration for the data source. + public let databaseName: String + /// The filter expressions specified in the relational filter configuration for the data source. + public let filterExpressions: [FilterExpression]? + /// The schema name specified in the relational filter configuration for the data source. + public let schemaName: String? + + public init(databaseName: String, filterExpressions: [FilterExpression]? = nil, schemaName: String? = nil) { + self.databaseName = databaseName + self.filterExpressions = filterExpressions + self.schemaName = schemaName + } + + private enum CodingKeys: String, CodingKey { + case databaseName = "databaseName" + case filterExpressions = "filterExpressions" + case schemaName = "schemaName" + } + } + + public struct Resource: AWSDecodableShape { + /// The name of a provisioned resource of this Amazon DataZone environment. + public let name: String? + /// The provider of a provisioned resource of this Amazon DataZone environment. + public let provider: String? + /// The type of a provisioned resource of this Amazon DataZone environment. + public let type: String + /// The value of a provisioned resource of this Amazon DataZone environment. + public let value: String + + public init(name: String? = nil, provider: String? = nil, type: String, value: String) { + self.name = name + self.provider = provider + self.type = type + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case provider = "provider" + case type = "type" + case value = "value" + } + } + + public struct RevokeSubscriptionInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the Amazon DataZone domain where you want to revoke a subscription. + public let domainIdentifier: String + /// The identifier of the revoked subscription. + public let identifier: String + /// Specifies whether permissions are retained when the subscription is revoked. + public let retainPermissions: Bool? + + public init(domainIdentifier: String, identifier: String, retainPermissions: Bool? = nil) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.retainPermissions = retainPermissions + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case retainPermissions = "retainPermissions" + } + } + + public struct RevokeSubscriptionOutput: AWSDecodableShape { + /// The timestamp of when the subscription was revoked. + public let createdAt: Date + /// The identifier of the user who revoked the subscription. + public let createdBy: String + /// The identifier of the Amazon DataZone domain where you want to revoke a subscription. + public let domainId: String + /// The identifier of the revoked subscription. + public let id: String + /// Specifies whether permissions are retained when the subscription is revoked. + public let retainPermissions: Bool? + /// The status of the revoked subscription. + public let status: SubscriptionStatus + /// The subscribed listing of the revoked subscription. + public let subscribedListing: SubscribedListing + /// The subscribed principal of the revoked subscription. + public let subscribedPrincipal: SubscribedPrincipal + /// The identifier of the subscription request for the revoked subscription. + public let subscriptionRequestId: String? + /// The timestamp of when the subscription was revoked. + public let updatedAt: Date + /// The Amazon DataZone user who revoked the subscription. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, domainId: String, id: String, retainPermissions: Bool? = nil, status: SubscriptionStatus, subscribedListing: SubscribedListing, subscribedPrincipal: SubscribedPrincipal, subscriptionRequestId: String? = nil, updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.id = id + self.retainPermissions = retainPermissions + self.status = status + self.subscribedListing = subscribedListing + self.subscribedPrincipal = subscribedPrincipal + self.subscriptionRequestId = subscriptionRequestId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case id = "id" + case retainPermissions = "retainPermissions" + case status = "status" + case subscribedListing = "subscribedListing" + case subscribedPrincipal = "subscribedPrincipal" + case subscriptionRequestId = "subscriptionRequestId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct RunStatisticsForAssets: AWSDecodableShape { + /// The added statistic for the data source run. + public let added: Int? + /// The failed statistic for the data source run. + public let failed: Int? + /// The skipped statistic for the data source run. + public let skipped: Int? + /// The unchanged statistic for the data source run. + public let unchanged: Int? + /// The updated statistic for the data source run. + public let updated: Int? + + public init(added: Int? = nil, failed: Int? = nil, skipped: Int? = nil, unchanged: Int? = nil, updated: Int? = nil) { + self.added = added + self.failed = failed + self.skipped = skipped + self.unchanged = unchanged + self.updated = updated + } + + private enum CodingKeys: String, CodingKey { + case added = "added" + case failed = "failed" + case skipped = "skipped" + case unchanged = "unchanged" + case updated = "updated" + } + } + + public struct ScheduleConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The schedule of the data source runs. + public let schedule: String? + /// The timezone of the data source run. + public let timezone: Timezone? + + public init(schedule: String? = nil, timezone: Timezone? = nil) { + self.schedule = schedule + self.timezone = timezone + } + + public func validate(name: String) throws { + try self.validate(self.schedule, name: "schedule", parent: name, max: 256) + try self.validate(self.schedule, name: "schedule", parent: name, min: 1) + try self.validate(self.schedule, name: "schedule", parent: name, pattern: "cron\\((\\b[0-5]?[0-9]\\b) (\\b2[0-3]\\b|\\b[0-1]?[0-9]\\b) (.*){1,5} (.*){1,5} (.*){1,5} (.*){1,5}\\)") + } + + private enum CodingKeys: String, CodingKey { + case schedule = "schedule" + case timezone = "timezone" + } + } + + public struct SearchGroupProfilesInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain in which you want to search group profiles. + public let domainIdentifier: String + /// The group type for which to search. + public let groupType: GroupSearchType + /// The maximum number of results to return in a single call to SearchGroupProfiles. When the number of results to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to SearchGroupProfiles to list the next set of results. + public let maxResults: Int? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchGroupProfiles to list the next set of results. + public let nextToken: String? + /// Specifies the text for which to search. + public let searchText: String? + + public init(domainIdentifier: String, groupType: GroupSearchType, maxResults: Int? = nil, nextToken: String? = nil, searchText: String? = nil) { + self.domainIdentifier = domainIdentifier + self.groupType = groupType + self.maxResults = maxResults + self.nextToken = nextToken + self.searchText = searchText + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.searchText, name: "searchText", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case groupType = "groupType" + case maxResults = "maxResults" + case nextToken = "nextToken" + case searchText = "searchText" + } + } + + public struct SearchGroupProfilesOutput: AWSDecodableShape { + /// The results of the SearchGroupProfiles action. + public let items: [GroupProfileSummary]? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchGroupProfiles to list the next set of results. + public let nextToken: String? + + public init(items: [GroupProfileSummary]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct SearchInItem: AWSEncodableShape { + /// The search attribute. + public let attribute: String + + public init(attribute: String) { + self.attribute = attribute + } + + public func validate(name: String) throws { + try self.validate(self.attribute, name: "attribute", parent: name, max: 128) + try self.validate(self.attribute, name: "attribute", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case attribute = "attribute" + } + } + + public struct SearchInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// Specifies additional attributes for the Search action. + public let additionalAttributes: [SearchOutputAdditionalAttribute]? + /// The identifier of the Amazon DataZone domain. + public let domainIdentifier: String + /// Specifies the search filters. + public let filters: FilterClause? + /// The maximum number of results to return in a single call to Search. When the number of results to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to Search to list the next set of results. + public let maxResults: Int? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to Search to list the next set of results. + public let nextToken: String? + /// The identifier of the owning project specified for the search. + public let owningProjectIdentifier: String? + public let searchIn: [SearchInItem]? + /// The scope of the search. + public let searchScope: InventorySearchScope + /// Specifies the text for which to search. + public let searchText: String? + /// Specifies the way in which the search results are to be sorted. + public let sort: SearchSort? + + public init(additionalAttributes: [SearchOutputAdditionalAttribute]? = nil, domainIdentifier: String, filters: FilterClause? = nil, maxResults: Int? = nil, nextToken: String? = nil, owningProjectIdentifier: String? = nil, searchIn: [SearchInItem]? = nil, searchScope: InventorySearchScope, searchText: String? = nil, sort: SearchSort? = nil) { + self.additionalAttributes = additionalAttributes + self.domainIdentifier = domainIdentifier + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.owningProjectIdentifier = owningProjectIdentifier + self.searchIn = searchIn + self.searchScope = searchScope + self.searchText = searchText + self.sort = sort + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.filters?.validate(name: "\(name).filters") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.owningProjectIdentifier, name: "owningProjectIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.searchIn?.forEach { + try $0.validate(name: "\(name).searchIn[]") + } + try self.validate(self.searchIn, name: "searchIn", parent: name, max: 10) + try self.validate(self.searchIn, name: "searchIn", parent: name, min: 1) + try self.validate(self.searchText, name: "searchText", parent: name, max: 4096) + try self.validate(self.searchText, name: "searchText", parent: name, min: 1) + try self.sort?.validate(name: "\(name).sort") + } + + private enum CodingKeys: String, CodingKey { + case additionalAttributes = "additionalAttributes" + case filters = "filters" + case maxResults = "maxResults" + case nextToken = "nextToken" + case owningProjectIdentifier = "owningProjectIdentifier" + case searchIn = "searchIn" + case searchScope = "searchScope" + case searchText = "searchText" + case sort = "sort" + } + } + + public struct SearchListingsInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// Specifies additional attributes for the search. + public let additionalAttributes: [SearchOutputAdditionalAttribute]? + /// The identifier of the domain in which to search listings. + public let domainIdentifier: String + /// Specifies the filters for the search of listings. + public let filters: FilterClause? + /// The maximum number of results to return in a single call to SearchListings. When the number of results to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to SearchListings to list the next set of results. + public let maxResults: Int? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchListings to list the next set of results. + public let nextToken: String? + public let searchIn: [SearchInItem]? + /// Specifies the text for which to search. + public let searchText: String? + /// Specifies the way for sorting the search results. + public let sort: SearchSort? + + public init(additionalAttributes: [SearchOutputAdditionalAttribute]? = nil, domainIdentifier: String, filters: FilterClause? = nil, maxResults: Int? = nil, nextToken: String? = nil, searchIn: [SearchInItem]? = nil, searchText: String? = nil, sort: SearchSort? = nil) { + self.additionalAttributes = additionalAttributes + self.domainIdentifier = domainIdentifier + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.searchIn = searchIn + self.searchText = searchText + self.sort = sort + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.filters?.validate(name: "\(name).filters") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.searchIn?.forEach { + try $0.validate(name: "\(name).searchIn[]") + } + try self.validate(self.searchIn, name: "searchIn", parent: name, max: 10) + try self.validate(self.searchIn, name: "searchIn", parent: name, min: 1) + try self.sort?.validate(name: "\(name).sort") + } + + private enum CodingKeys: String, CodingKey { + case additionalAttributes = "additionalAttributes" + case filters = "filters" + case maxResults = "maxResults" + case nextToken = "nextToken" + case searchIn = "searchIn" + case searchText = "searchText" + case sort = "sort" + } + } + + public struct SearchListingsOutput: AWSDecodableShape { + /// The results of the SearchListings action. + public let items: [SearchResultItem]? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchListings to list the next set of results. + public let nextToken: String? + /// Total number of search results. + public let totalMatchCount: Int? + + public init(items: [SearchResultItem]? = nil, nextToken: String? = nil, totalMatchCount: Int? = nil) { + self.items = items + self.nextToken = nextToken + self.totalMatchCount = totalMatchCount + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + case totalMatchCount = "totalMatchCount" + } + } + + public struct SearchOutput: AWSDecodableShape { + /// The results of the Search action. + public let items: [SearchInventoryResultItem]? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to Search to list the next set of results. + public let nextToken: String? + /// Total number of search results. + public let totalMatchCount: Int? + + public init(items: [SearchInventoryResultItem]? = nil, nextToken: String? = nil, totalMatchCount: Int? = nil) { + self.items = items + self.nextToken = nextToken + self.totalMatchCount = totalMatchCount + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + case totalMatchCount = "totalMatchCount" + } + } + + public struct SearchSort: AWSEncodableShape { + /// The attribute detail of the way to sort search results. + public let attribute: String + /// The order detail of the wya to sort search results. + public let order: SortOrder? + + public init(attribute: String, order: SortOrder? = nil) { + self.attribute = attribute + self.order = order + } + + public func validate(name: String) throws { + try self.validate(self.attribute, name: "attribute", parent: name, max: 128) + try self.validate(self.attribute, name: "attribute", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case attribute = "attribute" + case order = "order" + } + } + + public struct SearchTypesInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain in which to invoke the SearchTypes action. + public let domainIdentifier: String + /// The filters for the SearchTypes action. + public let filters: FilterClause? + public let managed: Bool + /// The maximum number of results to return in a single call to SearchTypes. When the number of results to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to SearchTypes to list the next set of results. + public let maxResults: Int? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchTypes to list the next set of results. + public let nextToken: String? + public let searchIn: [SearchInItem]? + /// Specifies the scope of the search for types. + public let searchScope: TypesSearchScope + /// Specifies the text for which to search. + public let searchText: String? + /// The specifies the way to sort the SearchTypes results. + public let sort: SearchSort? + + public init(domainIdentifier: String, filters: FilterClause? = nil, managed: Bool, maxResults: Int? = nil, nextToken: String? = nil, searchIn: [SearchInItem]? = nil, searchScope: TypesSearchScope, searchText: String? = nil, sort: SearchSort? = nil) { + self.domainIdentifier = domainIdentifier + self.filters = filters + self.managed = managed + self.maxResults = maxResults + self.nextToken = nextToken + self.searchIn = searchIn + self.searchScope = searchScope + self.searchText = searchText + self.sort = sort + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.filters?.validate(name: "\(name).filters") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.searchIn?.forEach { + try $0.validate(name: "\(name).searchIn[]") + } + try self.validate(self.searchIn, name: "searchIn", parent: name, max: 10) + try self.validate(self.searchIn, name: "searchIn", parent: name, min: 1) + try self.validate(self.searchText, name: "searchText", parent: name, max: 4096) + try self.validate(self.searchText, name: "searchText", parent: name, min: 1) + try self.sort?.validate(name: "\(name).sort") + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + case managed = "managed" + case maxResults = "maxResults" + case nextToken = "nextToken" + case searchIn = "searchIn" + case searchScope = "searchScope" + case searchText = "searchText" + case sort = "sort" + } + } + + public struct SearchTypesOutput: AWSDecodableShape { + /// The results of the SearchTypes action. + public let items: [SearchTypesResultItem]? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchTypes to list the next set of results. + public let nextToken: String? + /// Total number of search results. + public let totalMatchCount: Int? + + public init(items: [SearchTypesResultItem]? = nil, nextToken: String? = nil, totalMatchCount: Int? = nil) { + self.items = items + self.nextToken = nextToken + self.totalMatchCount = totalMatchCount + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + case totalMatchCount = "totalMatchCount" + } + } + + public struct SearchUserProfilesInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain in which you want to search user profiles. + public let domainIdentifier: String + /// The maximum number of results to return in a single call to SearchUserProfiles. When the number of results to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to SearchUserProfiles to list the next set of results. + public let maxResults: Int? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchUserProfiles to list the next set of results. + public let nextToken: String? + /// Specifies the text for which to search. + public let searchText: String? + /// Specifies the user type for the SearchUserProfiles action. + public let userType: UserSearchType + + public init(domainIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil, searchText: String? = nil, userType: UserSearchType) { + self.domainIdentifier = domainIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + self.searchText = searchText + self.userType = userType + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.searchText, name: "searchText", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + case searchText = "searchText" + case userType = "userType" + } + } + + public struct SearchUserProfilesOutput: AWSDecodableShape { + /// The results of the SearchUserProfiles action. + public let items: [UserProfileSummary]? + /// When the number of results is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of results, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to SearchUserProfiles to list the next set of results. + public let nextToken: String? + + public init(items: [UserProfileSummary]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + + public struct SingleSignOn: AWSEncodableShape & AWSDecodableShape { + /// The type of single sign-on in Amazon DataZone. + public let type: AuthType? + /// The single sign-on user assignment in Amazon DataZone. + public let userAssignment: UserAssignment? + + public init(type: AuthType? = nil, userAssignment: UserAssignment? = nil) { + self.type = type + self.userAssignment = userAssignment + } + + private enum CodingKeys: String, CodingKey { + case type = "type" + case userAssignment = "userAssignment" + } + } + + public struct SsoUserProfileDetails: AWSDecodableShape { + /// The first name included in the single sign-on details of the user profile. + public let firstName: String? + /// The last name included in the single sign-on details of the user profile. + public let lastName: String? + /// The username included in the single sign-on details of the user profile. + public let username: String? + + public init(firstName: String? = nil, lastName: String? = nil, username: String? = nil) { + self.firstName = firstName + self.lastName = lastName + self.username = username + } + + private enum CodingKeys: String, CodingKey { + case firstName = "firstName" + case lastName = "lastName" + case username = "username" + } + } + + public struct StartDataSourceRunInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "dataSourceIdentifier", location: .uri("dataSourceIdentifier")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The identifier of the data source. + public let dataSourceIdentifier: String + /// The identifier of the Amazon DataZone domain in which to start a data source run. + public let domainIdentifier: String + + public init(clientToken: String? = StartDataSourceRunInput.idempotencyToken(), dataSourceIdentifier: String, domainIdentifier: String) { + self.clientToken = clientToken + self.dataSourceIdentifier = dataSourceIdentifier + self.domainIdentifier = domainIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.dataSourceIdentifier, name: "dataSourceIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + } + } + + public struct StartDataSourceRunOutput: AWSDecodableShape { + /// The timestamp of when data source run was created. + @CustomCoding + public var createdAt: Date + /// The configuration snapshot of the data source that is being run. + public let dataSourceConfigurationSnapshot: String? + /// The identifier of the data source. + public let dataSourceId: String + /// The identifier of the Amazon DataZone domain in which to start a data source run. + public let domainId: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let errorMessage: DataSourceErrorMessage? + /// The identifier of the data source run. + public let id: String + /// The identifier of the project. + public let projectId: String + /// Specifies run statistics for assets. + public let runStatisticsForAssets: RunStatisticsForAssets? + /// The timestamp of when the data source run was started. + @OptionalCustomCoding + public var startedAt: Date? + /// The status of the data source run. + public let status: DataSourceRunStatus + /// The timestamp of when the data source run was stopped. + @OptionalCustomCoding + public var stoppedAt: Date? + /// The type of the data source run. + public let type: DataSourceRunType + /// The timestamp of when the data source run was updated. + @CustomCoding + public var updatedAt: Date + + public init(createdAt: Date, dataSourceConfigurationSnapshot: String? = nil, dataSourceId: String, domainId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, projectId: String, runStatisticsForAssets: RunStatisticsForAssets? = nil, startedAt: Date? = nil, status: DataSourceRunStatus, stoppedAt: Date? = nil, type: DataSourceRunType, updatedAt: Date) { + self.createdAt = createdAt + self.dataSourceConfigurationSnapshot = dataSourceConfigurationSnapshot + self.dataSourceId = dataSourceId + self.domainId = domainId + self.errorMessage = errorMessage + self.id = id + self.projectId = projectId + self.runStatisticsForAssets = runStatisticsForAssets + self.startedAt = startedAt + self.status = status + self.stoppedAt = stoppedAt + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case dataSourceConfigurationSnapshot = "dataSourceConfigurationSnapshot" + case dataSourceId = "dataSourceId" + case domainId = "domainId" + case errorMessage = "errorMessage" + case id = "id" + case projectId = "projectId" + case runStatisticsForAssets = "runStatisticsForAssets" + case startedAt = "startedAt" + case status = "status" + case stoppedAt = "stoppedAt" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct SubscribedAsset: AWSDecodableShape { + /// The identifier of the asset for which the subscription grant is created. + public let assetId: String + /// The revision of the asset for which the subscription grant is created. + public let assetRevision: String + /// The failure cause included in the details of the asset for which the subscription grant is created. + public let failureCause: FailureCause? + /// The failure timestamp included in the details of the asset for which the subscription grant is created. + public let failureTimestamp: Date? + /// The timestamp of when the subscription grant to the asset is created. + public let grantedTimestamp: Date? + /// The status of the asset for which the subscription grant is created. + public let status: SubscriptionGrantStatus + /// The target name of the asset for which the subscription grant is created. + public let targetName: String? + + public init(assetId: String, assetRevision: String, failureCause: FailureCause? = nil, failureTimestamp: Date? = nil, grantedTimestamp: Date? = nil, status: SubscriptionGrantStatus, targetName: String? = nil) { + self.assetId = assetId + self.assetRevision = assetRevision + self.failureCause = failureCause + self.failureTimestamp = failureTimestamp + self.grantedTimestamp = grantedTimestamp + self.status = status + self.targetName = targetName + } + + private enum CodingKeys: String, CodingKey { + case assetId = "assetId" + case assetRevision = "assetRevision" + case failureCause = "failureCause" + case failureTimestamp = "failureTimestamp" + case grantedTimestamp = "grantedTimestamp" + case status = "status" + case targetName = "targetName" + } + } + + public struct SubscribedAssetListing: AWSDecodableShape { + /// The identifier of the published asset for which the subscription grant is created. + public let entityId: String? + /// The revision of the published asset for which the subscription grant is created. + public let entityRevision: String? + /// The type of the published asset for which the subscription grant is created. + public let entityType: String? + /// The forms attached to the published asset for which the subscription grant is created. + public let forms: String? + /// The glossary terms attached to the published asset for which the subscription grant is created. + public let glossaryTerms: [DetailedGlossaryTerm]? + + public init(entityId: String? = nil, entityRevision: String? = nil, entityType: String? = nil, forms: String? = nil, glossaryTerms: [DetailedGlossaryTerm]? = nil) { + self.entityId = entityId + self.entityRevision = entityRevision + self.entityType = entityType + self.forms = forms + self.glossaryTerms = glossaryTerms + } + + private enum CodingKeys: String, CodingKey { + case entityId = "entityId" + case entityRevision = "entityRevision" + case entityType = "entityType" + case forms = "forms" + case glossaryTerms = "glossaryTerms" + } + } + + public struct SubscribedListing: AWSDecodableShape { + /// The description of the published asset for which the subscription grant is created. + public let description: String + /// The identifier of the published asset for which the subscription grant is created. + public let id: String + /// The published asset for which the subscription grant is created. + public let item: SubscribedListingItem + /// The name of the published asset for which the subscription grant is created. + public let name: String + /// The identifier of the project of the published asset for which the subscription grant is created. + public let ownerProjectId: String + /// The name of the project that owns the published asset for which the subscription grant is created. + public let ownerProjectName: String? + /// The revision of the published asset for which the subscription grant is created. + public let revision: String? + + public init(description: String, id: String, item: SubscribedListingItem, name: String, ownerProjectId: String, ownerProjectName: String? = nil, revision: String? = nil) { + self.description = description + self.id = id + self.item = item + self.name = name + self.ownerProjectId = ownerProjectId + self.ownerProjectName = ownerProjectName + self.revision = revision + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case id = "id" + case item = "item" + case name = "name" + case ownerProjectId = "ownerProjectId" + case ownerProjectName = "ownerProjectName" + case revision = "revision" + } + } + + public struct SubscribedListingInput: AWSEncodableShape { + /// The identifier of the published asset for which the subscription grant is to be created. + public let identifier: String + + public init(identifier: String) { + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case identifier = "identifier" + } + } + + public struct SubscribedProject: AWSDecodableShape { + /// The identifier of the project that has the subscription grant. + public let id: String? + /// The name of the project that has the subscription grant. + public let name: String? + + public init(id: String? = nil, name: String? = nil) { + self.id = id + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case name = "name" + } + } + + public struct SubscribedProjectInput: AWSEncodableShape { + /// The identifier of the project that is to be given a subscription grant. + public let identifier: String? + + public init(identifier: String? = nil) { + self.identifier = identifier + } + + public func validate(name: String) throws { + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case identifier = "identifier" + } + } + + public struct SubscriptionGrantSummary: AWSDecodableShape { + /// The assets included in the subscription grant. + public let assets: [SubscribedAsset]? + /// The timestamp of when a subscription grant was created. + public let createdAt: Date + /// The datazone user who created the subscription grant. + public let createdBy: String + /// The identifier of the Amazon DataZone domain in which a subscription grant exists. + public let domainId: String + /// The entity to which the subscription is granted. + public let grantedEntity: GrantedEntity + /// The identifier of the subscription grant. + public let id: String + /// The status of the subscription grant. + public let status: SubscriptionGrantOverallStatus + /// The ID of the subscription grant. + public let subscriptionId: String? + /// The identifier of the target of the subscription grant. + public let subscriptionTargetId: String + /// The timestampf of when the subscription grant was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription grant. + public let updatedBy: String? + + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = subscriptionId + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case assets = "assets" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case grantedEntity = "grantedEntity" + case id = "id" + case status = "status" + case subscriptionId = "subscriptionId" + case subscriptionTargetId = "subscriptionTargetId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct SubscriptionRequestSummary: AWSDecodableShape { + /// The timestamp of when a subscription request was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription request. + public let createdBy: String + /// The decision comment of the subscription request. + public let decisionComment: String? + /// The identifier of the Amazon DataZone domain in which a subscription request exists. + public let domainId: String + /// The identifier of the subscription request. + public let id: String + /// The reason for the subscription request. + public let requestReason: String + /// The identifier of the subscription request reviewer. + public let reviewerId: String? + /// The status of the subscription request. + public let status: SubscriptionRequestStatus + /// The listings included in the subscription request. + public let subscribedListings: [SubscribedListing] + /// The principals included in the subscription request. + public let subscribedPrincipals: [SubscribedPrincipal] + /// The timestamp of when the subscription request was updated. + public let updatedAt: Date + /// The identifier of the Amazon DataZone user who updated the subscription request. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, decisionComment: String? = nil, domainId: String, id: String, requestReason: String, reviewerId: String? = nil, status: SubscriptionRequestStatus, subscribedListings: [SubscribedListing], subscribedPrincipals: [SubscribedPrincipal], updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.decisionComment = decisionComment + self.domainId = domainId + self.id = id + self.requestReason = requestReason + self.reviewerId = reviewerId + self.status = status + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case decisionComment = "decisionComment" + case domainId = "domainId" + case id = "id" + case requestReason = "requestReason" + case reviewerId = "reviewerId" + case status = "status" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct SubscriptionSummary: AWSDecodableShape { + /// The timestamp of when the subscription was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription. + public let createdBy: String + /// The identifier of the Amazon DataZone domain in which a subscription exists. + public let domainId: String + /// The identifier of the subscription. + public let id: String + /// The retain permissions included in the subscription. + public let retainPermissions: Bool? + /// The status of the subscription. + public let status: SubscriptionStatus + /// The listing included in the subscription. + public let subscribedListing: SubscribedListing + /// The principal included in the subscription. + public let subscribedPrincipal: SubscribedPrincipal + /// The identifier of the subscription request for the subscription. + public let subscriptionRequestId: String? + /// The timestamp of when the subscription was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, domainId: String, id: String, retainPermissions: Bool? = nil, status: SubscriptionStatus, subscribedListing: SubscribedListing, subscribedPrincipal: SubscribedPrincipal, subscriptionRequestId: String? = nil, updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.id = id + self.retainPermissions = retainPermissions + self.status = status + self.subscribedListing = subscribedListing + self.subscribedPrincipal = subscribedPrincipal + self.subscriptionRequestId = subscriptionRequestId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case id = "id" + case retainPermissions = "retainPermissions" + case status = "status" + case subscribedListing = "subscribedListing" + case subscribedPrincipal = "subscribedPrincipal" + case subscriptionRequestId = "subscriptionRequestId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct SubscriptionTargetForm: AWSEncodableShape & AWSDecodableShape { + /// The content of the subscription target configuration. + public let content: String + /// The form name included in the subscription target configuration. + public let formName: String + + public init(content: String, formName: String) { + self.content = content + self.formName = formName + } + + public func validate(name: String) throws { + try self.validate(self.formName, name: "formName", parent: name, max: 128) + try self.validate(self.formName, name: "formName", parent: name, min: 1) + try self.validate(self.formName, name: "formName", parent: name, pattern: "^(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$") + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case formName = "formName" + } + } + + public struct SubscriptionTargetSummary: AWSDecodableShape { + /// The asset types included in the subscription target. + public let applicableAssetTypes: [String] + /// The authorized principals included in the subscription target. + public let authorizedPrincipals: [String] + /// The timestamp of when the subscription target was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription target. + public let createdBy: String + /// The identifier of the Amazon DataZone domain in which the subscription target exists. + public let domainId: String + /// The identifier of the environment of the subscription target. + public let environmentId: String + /// The identifier of the subscription target. + public let id: String + /// The manage access role specified in the subscription target. + public let manageAccessRole: String + /// The name of the subscription target. + public let name: String + /// The identifier of the project specified in the subscription target. + public let projectId: String + /// The provider of the subscription target. + public let provider: String + /// The configuration of the subscription target. + public let subscriptionTargetConfig: [SubscriptionTargetForm] + /// The type of the subscription target. + public let type: String + /// The timestamp of when the subscription target was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the subscription target. + public let updatedBy: String? + + public init(applicableAssetTypes: [String], authorizedPrincipals: [String], createdAt: Date, createdBy: String, domainId: String, environmentId: String, id: String, manageAccessRole: String, name: String, projectId: String, provider: String, subscriptionTargetConfig: [SubscriptionTargetForm], type: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.applicableAssetTypes = applicableAssetTypes + self.authorizedPrincipals = authorizedPrincipals + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.manageAccessRole = manageAccessRole + self.name = name + self.projectId = projectId + self.provider = provider + self.subscriptionTargetConfig = subscriptionTargetConfig + self.type = type + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case applicableAssetTypes = "applicableAssetTypes" + case authorizedPrincipals = "authorizedPrincipals" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case manageAccessRole = "manageAccessRole" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case subscriptionTargetConfig = "subscriptionTargetConfig" + case type = "type" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "resourceArn", location: .uri("resourceArn")) + ] + + /// The ARN of the resource to be tagged in Amazon DataZone. + public let resourceArn: String + /// Specifies the tags for the TagResource action. + public let tags: [String: String] + + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^[\\w \\.:/=+@-]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[\\w \\.:/=+@-]*$") + } + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TermRelations: AWSEncodableShape & AWSDecodableShape { + /// The classifies of the term relations. + public let classifies: [String]? + /// The isA property of the term relations. + public let isA: [String]? + + public init(classifies: [String]? = nil, isA: [String]? = nil) { + self.classifies = classifies + self.isA = isA + } + + public func validate(name: String) throws { + try self.classifies?.forEach { + try validate($0, name: "classifies[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.classifies, name: "classifies", parent: name, max: 20) + try self.validate(self.classifies, name: "classifies", parent: name, min: 1) + try self.isA?.forEach { + try validate($0, name: "isA[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.isA, name: "isA", parent: name, max: 20) + try self.validate(self.isA, name: "isA", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case classifies = "classifies" + case isA = "isA" + } + } + + public struct Topic: AWSDecodableShape { + public let resource: NotificationResource + /// The role of the resource mentioned in a notification. + public let role: NotificationRole + /// The subject of the resource mentioned in a notification. + public let subject: String + + public init(resource: NotificationResource, role: NotificationRole, subject: String) { + self.resource = resource + self.role = role + self.subject = subject + } + + private enum CodingKeys: String, CodingKey { + case resource = "resource" + case role = "role" + case subject = "subject" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "resourceArn", location: .uri("resourceArn")), + AWSMemberEncoding(label: "tagKeys", location: .querystring("tagKeys")) + ] + + /// The ARN of the resource to be untagged in Amazon DataZone. + public let resourceArn: String + /// Specifies the tag keys for the UntagResource action. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^[\\w \\.:/=+@-]+$") + } + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateDataSourceInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The asset forms to be updated as part of the UpdateDataSource action. + public let assetFormsInput: [FormInput]? + /// The configuration to be updated as part of the UpdateDataSource action. + public let configuration: DataSourceConfigurationInput? + /// The description to be updated as part of the UpdateDataSource action. + public let description: String? + /// The identifier of the domain in which to update a data source. + public let domainIdentifier: String + /// The enable setting to be updated as part of the UpdateDataSource action. + public let enableSetting: EnableSetting? + /// The identifier of the data source to be updated. + public let identifier: String + /// The name to be updated as part of the UpdateDataSource action. + public let name: String? + /// The publish on import setting to be updated as part of the UpdateDataSource action. + public let publishOnImport: Bool? + /// The recommendation to be updated as part of the UpdateDataSource action. + public let recommendation: RecommendationConfiguration? + /// The schedule to be updated as part of the UpdateDataSource action. + public let schedule: ScheduleConfiguration? + + public init(assetFormsInput: [FormInput]? = nil, configuration: DataSourceConfigurationInput? = nil, description: String? = nil, domainIdentifier: String, enableSetting: EnableSetting? = nil, identifier: String, name: String? = nil, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil) { + self.assetFormsInput = assetFormsInput + self.configuration = configuration + self.description = description + self.domainIdentifier = domainIdentifier + self.enableSetting = enableSetting + self.identifier = identifier + self.name = name + self.publishOnImport = publishOnImport + self.recommendation = recommendation + self.schedule = schedule + } + + public func validate(name: String) throws { + try self.assetFormsInput?.forEach { + try $0.validate(name: "\(name).assetFormsInput[]") + } + try self.validate(self.assetFormsInput, name: "assetFormsInput", parent: name, max: 10) + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.schedule?.validate(name: "\(name).schedule") + } + + private enum CodingKeys: String, CodingKey { + case assetFormsInput = "assetFormsInput" + case configuration = "configuration" + case description = "description" + case enableSetting = "enableSetting" + case name = "name" + case publishOnImport = "publishOnImport" + case recommendation = "recommendation" + case schedule = "schedule" + } + } + + public struct UpdateDataSourceOutput: AWSDecodableShape { + /// The asset forms to be updated as part of the UpdateDataSource action. + public let assetFormsOutput: [FormOutput]? + /// The configuration to be updated as part of the UpdateDataSource action. + public let configuration: DataSourceConfigurationOutput? + /// The timestamp of when the data source was updated. + @OptionalCustomCoding + public var createdAt: Date? + /// The description to be updated as part of the UpdateDataSource action. + public let description: String? + /// The identifier of the Amazon DataZone domain in which a data source is to be updated. + public let domainId: String + /// The enable setting to be updated as part of the UpdateDataSource action. + public let enableSetting: EnableSetting? + /// The identifier of the environment in which a data source is to be updated. + public let environmentId: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let errorMessage: DataSourceErrorMessage? + /// The identifier of the data source to be updated. + public let id: String + /// The timestamp of when the data source was last run. + @OptionalCustomCoding + public var lastRunAt: Date? + /// The last run error message of the data source. + public let lastRunErrorMessage: DataSourceErrorMessage? + /// The last run status of the data source. + public let lastRunStatus: DataSourceRunStatus? + /// The name to be updated as part of the UpdateDataSource action. + public let name: String + /// The identifier of the project where data source is to be updated. + public let projectId: String + /// The publish on import setting to be updated as part of the UpdateDataSource action. + public let publishOnImport: Bool? + /// The recommendation to be updated as part of the UpdateDataSource action. + public let recommendation: RecommendationConfiguration? + /// The schedule to be updated as part of the UpdateDataSource action. + public let schedule: ScheduleConfiguration? + /// The status to be updated as part of the UpdateDataSource action. + public let status: DataSourceStatus? + /// The type to be updated as part of the UpdateDataSource action. + public let type: String? + /// The timestamp of when the data source was updated. + @OptionalCustomCoding + public var updatedAt: Date? + + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + self.assetFormsOutput = assetFormsOutput + self.configuration = configuration + self.createdAt = createdAt + self.description = description + self.domainId = domainId + self.enableSetting = enableSetting + self.environmentId = environmentId + self.errorMessage = errorMessage + self.id = id + self.lastRunAt = lastRunAt + self.lastRunErrorMessage = lastRunErrorMessage + self.lastRunStatus = lastRunStatus + self.name = name + self.projectId = projectId + self.publishOnImport = publishOnImport + self.recommendation = recommendation + self.schedule = schedule + self.status = status + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case assetFormsOutput = "assetFormsOutput" + case configuration = "configuration" + case createdAt = "createdAt" + case description = "description" + case domainId = "domainId" + case enableSetting = "enableSetting" + case environmentId = "environmentId" + case errorMessage = "errorMessage" + case id = "id" + case lastRunAt = "lastRunAt" + case lastRunErrorMessage = "lastRunErrorMessage" + case lastRunStatus = "lastRunStatus" + case name = "name" + case projectId = "projectId" + case publishOnImport = "publishOnImport" + case recommendation = "recommendation" + case schedule = "schedule" + case status = "status" + case type = "type" + case updatedAt = "updatedAt" + } + } + + public struct UpdateDomainInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "clientToken", location: .querystring("clientToken")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The description to be updated as part of the UpdateDomain action. + public let description: String? + /// The domain execution role to be updated as part of the UpdateDomain action. + public let domainExecutionRole: String? + /// The ID of the Amazon Web Services domain that is to be updated. + public let identifier: String + /// The name to be updated as part of the UpdateDomain action. + public let name: String? + /// The single sign-on option to be updated as part of the UpdateDomain action. + public let singleSignOn: SingleSignOn? + + public init(clientToken: String? = UpdateDomainInput.idempotencyToken(), description: String? = nil, domainExecutionRole: String? = nil, identifier: String, name: String? = nil, singleSignOn: SingleSignOn? = nil) { + self.clientToken = clientToken + self.description = description + self.domainExecutionRole = domainExecutionRole + self.identifier = identifier + self.name = name + self.singleSignOn = singleSignOn + } + + public func validate(name: String) throws { + try self.validate(self.domainExecutionRole, name: "domainExecutionRole", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainExecutionRole = "domainExecutionRole" + case name = "name" + case singleSignOn = "singleSignOn" + } + } + + public struct UpdateDomainOutput: AWSDecodableShape { + /// The description to be updated as part of the UpdateDomain action. + public let description: String? + /// The domain execution role to be updated as part of the UpdateDomain action. + public let domainExecutionRole: String? + /// The identifier of the Amazon DataZone domain. + public let id: String + /// Specifies the timestamp of when the domain was last updated. + public let lastUpdatedAt: Date? + /// The name to be updated as part of the UpdateDomain action. + public let name: String? + /// The single sign-on option of the Amazon DataZone domain. + public let singleSignOn: SingleSignOn? + + public init(description: String? = nil, domainExecutionRole: String? = nil, id: String, lastUpdatedAt: Date? = nil, name: String? = nil, singleSignOn: SingleSignOn? = nil) { + self.description = description + self.domainExecutionRole = domainExecutionRole + self.id = id + self.lastUpdatedAt = lastUpdatedAt + self.name = name + self.singleSignOn = singleSignOn + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainExecutionRole = "domainExecutionRole" + case id = "id" + case lastUpdatedAt = "lastUpdatedAt" + case name = "name" + case singleSignOn = "singleSignOn" + } + } + + public struct UpdateEnvironmentInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The description to be updated as part of the UpdateEnvironment action. + public let description: String? + /// The identifier of the domain in which the environment is to be updated. + public let domainIdentifier: String + /// The glossary terms to be updated as part of the UpdateEnvironment action. + public let glossaryTerms: [String]? + /// The identifier of the environment that is to be updated. + public let identifier: String + /// The name to be updated as part of the UpdateEnvironment action. + public let name: String? + + public init(description: String? = nil, domainIdentifier: String, glossaryTerms: [String]? = nil, identifier: String, name: String? = nil) { + self.description = description + self.domainIdentifier = domainIdentifier + self.glossaryTerms = glossaryTerms + self.identifier = identifier + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case glossaryTerms = "glossaryTerms" + case name = "name" + } + } + + public struct UpdateEnvironmentOutput: AWSDecodableShape { + /// The identifier of the Amazon Web Services account in which the environment is to be updated. + public let awsAccountId: String? + /// The Amazon Web Services Region in which the environment is updated. + public let awsAccountRegion: String? + /// The timestamp of when the environment was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the environment. + public let createdBy: String + /// The deployment properties to be updated as part of the UpdateEnvironment action. + public let deploymentProperties: DeploymentProperties? + /// The description to be updated as part of the UpdateEnvironment action. + public let description: String? + /// The identifier of the domain in which the environment is to be updated. + public let domainId: String + /// The environment actions to be updated as part of the UpdateEnvironment action. + public let environmentActions: [ConfigurableEnvironmentAction]? + /// The blueprint identifier of the environment. + public let environmentBlueprintId: String? + /// The profile identifier of the environment. + public let environmentProfileId: String + /// The glossary terms to be updated as part of the UpdateEnvironment action. + public let glossaryTerms: [String]? + /// The identifier of the environment that is to be updated. + public let id: String? + /// The last deployment of the environment. + public let lastDeployment: Deployment? + /// The name to be updated as part of the UpdateEnvironment action. + public let name: String + /// The project identifier of the environment. + public let projectId: String + /// The provider identifier of the environment. + public let provider: String + /// The provisioned resources to be updated as part of the UpdateEnvironment action. + public let provisionedResources: [Resource]? + /// The provisioning properties to be updated as part of the UpdateEnvironment action. + public let provisioningProperties: ProvisioningProperties? + /// The status to be updated as part of the UpdateEnvironment action. + public let status: EnvironmentStatus? + /// The timestamp of when the environment was updated. + public let updatedAt: Date? + /// The user parameters to be updated as part of the UpdateEnvironment action. + public let userParameters: [CustomParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.deploymentProperties = deploymentProperties + self.description = description + self.domainId = domainId + self.environmentActions = environmentActions + self.environmentBlueprintId = environmentBlueprintId + self.environmentProfileId = environmentProfileId + self.glossaryTerms = glossaryTerms + self.id = id + self.lastDeployment = lastDeployment + self.name = name + self.projectId = projectId + self.provider = provider + self.provisionedResources = provisionedResources + self.provisioningProperties = provisioningProperties + self.status = status + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case deploymentProperties = "deploymentProperties" + case description = "description" + case domainId = "domainId" + case environmentActions = "environmentActions" + case environmentBlueprintId = "environmentBlueprintId" + case environmentProfileId = "environmentProfileId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case lastDeployment = "lastDeployment" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case provisionedResources = "provisionedResources" + case provisioningProperties = "provisioningProperties" + case status = "status" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct UpdateEnvironmentProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The Amazon Web Services account in which a specified environment profile is to be udpated. + public let awsAccountId: String? + /// The Amazon Web Services Region in which a specified environment profile is to be updated. + public let awsAccountRegion: String? + /// The description to be updated as part of the UpdateEnvironmentProfile action. + public let description: String? + /// The identifier of the Amazon DataZone domain in which an environment profile is to be updated. + public let domainIdentifier: String + /// The identifier of the environment profile that is to be updated. + public let identifier: String + /// The name to be updated as part of the UpdateEnvironmentProfile action. + public let name: String? + /// The user parameters to be updated as part of the UpdateEnvironmentProfile action. + public let userParameters: [EnvironmentParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, description: String? = nil, domainIdentifier: String, identifier: String, name: String? = nil, userParameters: [EnvironmentParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.description = description + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.name = name + self.userParameters = userParameters + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^\\d{12}$") + try self.validate(self.awsAccountRegion, name: "awsAccountRegion", parent: name, pattern: "^[a-z]{2}-[a-z]{4,10}-\\d$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case description = "description" + case name = "name" + case userParameters = "userParameters" + } + } + + public struct UpdateEnvironmentProfileOutput: AWSDecodableShape { + /// The Amazon Web Services account in which a specified environment profile is to be udpated. + public let awsAccountId: String? + /// The Amazon Web Services Region in which a specified environment profile is to be updated. + public let awsAccountRegion: String? + /// The timestamp of when the environment profile was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the environment profile. + public let createdBy: String + /// The description to be updated as part of the UpdateEnvironmentProfile action. + public let description: String? + /// The identifier of the Amazon DataZone domain in which the environment profile is to be updated. + public let domainId: String + /// The identifier of the blueprint of the environment profile that is to be updated. + public let environmentBlueprintId: String + /// The identifier of the environment profile that is to be udpated. + public let id: String + /// The name to be updated as part of the UpdateEnvironmentProfile action. + public let name: String + /// The identifier of the project of the environment profile that is to be updated. + public let projectId: String? + /// The timestamp of when the environment profile was updated. + public let updatedAt: Date? + /// The user parameters to be updated as part of the UpdateEnvironmentProfile action. + public let userParameters: [CustomParameter]? + + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentBlueprintId: String, id: String, name: String, projectId: String? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + self.awsAccountId = awsAccountId + self.awsAccountRegion = awsAccountRegion + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.environmentBlueprintId = environmentBlueprintId + self.id = id + self.name = name + self.projectId = projectId + self.updatedAt = updatedAt + self.userParameters = userParameters + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case awsAccountRegion = "awsAccountRegion" + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case environmentBlueprintId = "environmentBlueprintId" + case id = "id" + case name = "name" + case projectId = "projectId" + case updatedAt = "updatedAt" + case userParameters = "userParameters" + } + } + + public struct UpdateGlossaryInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// A unique, case-sensitive identifier that is provided to ensure the idempotency of the request. + public let clientToken: String? + /// The description to be updated as part of the UpdateGlossary action. + public let description: String? + /// The identifier of the Amazon DataZone domain in which a business glossary is to be updated. + public let domainIdentifier: String + /// The identifier of the business glossary to be updated. + public let identifier: String + /// The name to be updated as part of the UpdateGlossary action. + public let name: String? + /// The status to be updated as part of the UpdateGlossary action. + public let status: GlossaryStatus? + + public init(clientToken: String? = UpdateGlossaryInput.idempotencyToken(), description: String? = nil, domainIdentifier: String, identifier: String, name: String? = nil, status: GlossaryStatus? = nil) { + self.clientToken = clientToken + self.description = description + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.name = name + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x21-\\x7E]+$") + try self.validate(self.description, name: "description", parent: name, max: 4096) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case name = "name" + case status = "status" + } + } + + public struct UpdateGlossaryOutput: AWSDecodableShape { + /// The description to be updated as part of the UpdateGlossary action. + public let description: String? + /// The identifier of the Amazon DataZone domain in which a business glossary is to be updated. + public let domainId: String + /// The identifier of the business glossary that is to be updated. + public let id: String + /// The name to be updated as part of the UpdateGlossary action. + public let name: String + /// The identifier of the project in which to update a business glossary. + public let owningProjectId: String + /// The status to be updated as part of the UpdateGlossary action. + public let status: GlossaryStatus? + + public init(description: String? = nil, domainId: String, id: String, name: String, owningProjectId: String, status: GlossaryStatus? = nil) { + self.description = description + self.domainId = domainId + self.id = id + self.name = name + self.owningProjectId = owningProjectId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case id = "id" + case name = "name" + case owningProjectId = "owningProjectId" + case status = "status" + } + } + + public struct UpdateGlossaryTermInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the Amazon DataZone domain in which a business glossary term is to be updated. + public let domainIdentifier: String + /// The identifier of the business glossary in which a term is to be updated. + public let glossaryIdentifier: String? + /// The identifier of the business glossary term that is to be updated. + public let identifier: String + /// The long description to be updated as part of the UpdateGlossaryTerm action. + public let longDescription: String? + /// The name to be updated as part of the UpdateGlossaryTerm action. + public let name: String? + /// The short description to be updated as part of the UpdateGlossaryTerm action. + public let shortDescription: String? + /// The status to be updated as part of the UpdateGlossaryTerm action. + public let status: GlossaryTermStatus? + /// The term relations to be updated as part of the UpdateGlossaryTerm action. + public let termRelations: TermRelations? + + public init(domainIdentifier: String, glossaryIdentifier: String? = nil, identifier: String, longDescription: String? = nil, name: String? = nil, shortDescription: String? = nil, status: GlossaryTermStatus? = nil, termRelations: TermRelations? = nil) { + self.domainIdentifier = domainIdentifier + self.glossaryIdentifier = glossaryIdentifier + self.identifier = identifier + self.longDescription = longDescription + self.name = name + self.shortDescription = shortDescription + self.status = status + self.termRelations = termRelations + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.glossaryIdentifier, name: "glossaryIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.longDescription, name: "longDescription", parent: name, max: 4096) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.shortDescription, name: "shortDescription", parent: name, max: 1024) + try self.termRelations?.validate(name: "\(name).termRelations") + } + + private enum CodingKeys: String, CodingKey { + case glossaryIdentifier = "glossaryIdentifier" + case longDescription = "longDescription" + case name = "name" + case shortDescription = "shortDescription" + case status = "status" + case termRelations = "termRelations" + } + } + + public struct UpdateGlossaryTermOutput: AWSDecodableShape { + /// The identifier of the Amazon DataZone domain in which a business glossary term is to be updated. + public let domainId: String + /// The identifier of the business glossary in which a term is to be updated. + public let glossaryId: String + /// The identifier of the business glossary term that is to be updated. + public let id: String + /// The long description to be updated as part of the UpdateGlossaryTerm action. + public let longDescription: String? + /// The name to be updated as part of the UpdateGlossaryTerm action. + public let name: String + /// The short description to be updated as part of the UpdateGlossaryTerm action. + public let shortDescription: String? + /// The status to be updated as part of the UpdateGlossaryTerm action. + public let status: GlossaryTermStatus + /// The term relations to be updated as part of the UpdateGlossaryTerm action. + public let termRelations: TermRelations? + + public init(domainId: String, glossaryId: String, id: String, longDescription: String? = nil, name: String, shortDescription: String? = nil, status: GlossaryTermStatus, termRelations: TermRelations? = nil) { + self.domainId = domainId + self.glossaryId = glossaryId + self.id = id + self.longDescription = longDescription + self.name = name + self.shortDescription = shortDescription + self.status = status + self.termRelations = termRelations + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case glossaryId = "glossaryId" + case id = "id" + case longDescription = "longDescription" + case name = "name" + case shortDescription = "shortDescription" + case status = "status" + case termRelations = "termRelations" + } + } + + public struct UpdateGroupProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "groupIdentifier", location: .uri("groupIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain in which a group profile is updated. + public let domainIdentifier: String + /// The identifier of the group profile that is updated. + public let groupIdentifier: String + /// The status of the group profile that is updated. + public let status: GroupProfileStatus + + public init(domainIdentifier: String, groupIdentifier: String, status: GroupProfileStatus) { + self.domainIdentifier = domainIdentifier + self.groupIdentifier = groupIdentifier + self.status = status + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.groupIdentifier, name: "groupIdentifier", parent: name, pattern: "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r ]+)") + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + + public struct UpdateGroupProfileOutput: AWSDecodableShape { + /// The identifier of the Amazon DataZone domain in which a group profile is updated. + public let domainId: String? + /// The name of the group profile that is updated. + public let groupName: String? + /// The identifier of the group profile that is updated. + public let id: String? + /// The status of the group profile that is updated. + public let status: GroupProfileStatus? + + public init(domainId: String? = nil, groupName: String? = nil, id: String? = nil, status: GroupProfileStatus? = nil) { + self.domainId = domainId + self.groupName = groupName + self.id = id + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case domainId = "domainId" + case groupName = "groupName" + case id = "id" + case status = "status" + } + } + + public struct UpdateProjectInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The description to be updated as part of the UpdateProject action. + public let description: String? + /// The identifier of the Amazon DataZone domain in which a project is to be updated. + public let domainIdentifier: String + /// The glossary terms to be updated as part of the UpdateProject action. + public let glossaryTerms: [String]? + /// The identifier of the project that is to be updated. + public let identifier: String + /// The name to be updated as part of the UpdateProject action. + public let name: String? + + public init(description: String? = nil, domainIdentifier: String, glossaryTerms: [String]? = nil, identifier: String, name: String? = nil) { + self.description = description + self.domainIdentifier = domainIdentifier + self.glossaryTerms = glossaryTerms + self.identifier = identifier + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.glossaryTerms?.forEach { + try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, max: 20) + try self.validate(self.glossaryTerms, name: "glossaryTerms", parent: name, min: 1) + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case glossaryTerms = "glossaryTerms" + case name = "name" + } + } + + public struct UpdateProjectOutput: AWSDecodableShape { + /// The timestamp of when the project was created. + public let createdAt: Date? + /// The Amazon DataZone user who created the project. + public let createdBy: String + /// The description of the project that is to be updated. + public let description: String? + /// The identifier of the Amazon DataZone domain in which a project is updated. + public let domainId: String + /// The glossary terms of the project that are to be updated. + public let glossaryTerms: [String]? + /// The identifier of the project that is to be updated. + public let id: String + /// The timestamp of when the project was last updated. + public let lastUpdatedAt: Date? + /// The name of the project that is to be updated. + public let name: String + + public init(createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, glossaryTerms: [String]? = nil, id: String, lastUpdatedAt: Date? = nil, name: String) { + self.createdAt = createdAt + self.createdBy = createdBy + self.description = description + self.domainId = domainId + self.glossaryTerms = glossaryTerms + self.id = id + self.lastUpdatedAt = lastUpdatedAt + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case description = "description" + case domainId = "domainId" + case glossaryTerms = "glossaryTerms" + case id = "id" + case lastUpdatedAt = "lastUpdatedAt" + case name = "name" + } + } + + public struct UpdateSubscriptionGrantStatusInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "assetIdentifier", location: .uri("assetIdentifier")), + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the asset the subscription grant status of which is to be updated. + public let assetIdentifier: String + /// The identifier of the Amazon DataZone domain in which a subscription grant status is to be updated. + public let domainIdentifier: String + /// Specifies the error message that is returned if the operation cannot be successfully completed. + public let failureCause: FailureCause? + /// The identifier of the subscription grant the status of which is to be updated. + public let identifier: String + /// The status to be updated as part of the UpdateSubscriptionGrantStatus action. + public let status: SubscriptionGrantStatus + /// The target name to be updated as part of the UpdateSubscriptionGrantStatus action. + public let targetName: String? + + public init(assetIdentifier: String, domainIdentifier: String, failureCause: FailureCause? = nil, identifier: String, status: SubscriptionGrantStatus, targetName: String? = nil) { + self.assetIdentifier = assetIdentifier + self.domainIdentifier = domainIdentifier + self.failureCause = failureCause + self.identifier = identifier + self.status = status + self.targetName = targetName + } + + public func validate(name: String) throws { + try self.validate(self.assetIdentifier, name: "assetIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case failureCause = "failureCause" + case status = "status" + case targetName = "targetName" + } + } + + public struct UpdateSubscriptionGrantStatusOutput: AWSDecodableShape { + public let assets: [SubscribedAsset]? + /// The timestamp of when the subscription grant status was created. + public let createdAt: Date + /// The Amazon DataZone domain user who created the subscription grant status. + public let createdBy: String + /// The identifier of the Amazon DataZone domain in which a subscription grant status is to be updated. + public let domainId: String + /// The granted entity to be updated as part of the UpdateSubscriptionGrantStatus action. + public let grantedEntity: GrantedEntity + /// The identifier of the subscription grant. + public let id: String + /// The status to be updated as part of the UpdateSubscriptionGrantStatus action. + public let status: SubscriptionGrantOverallStatus + /// The identifier of the subscription. + public let subscriptionId: String? + /// The identifier of the subscription target whose subscription grant status is to be updated. + public let subscriptionTargetId: String + /// The timestamp of when the subscription grant status is to be updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription grant status. + public let updatedBy: String? + + public init(assets: [SubscribedAsset]? = nil, createdAt: Date, createdBy: String, domainId: String, grantedEntity: GrantedEntity, id: String, status: SubscriptionGrantOverallStatus, subscriptionId: String? = nil, subscriptionTargetId: String, updatedAt: Date, updatedBy: String? = nil) { + self.assets = assets + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.grantedEntity = grantedEntity + self.id = id + self.status = status + self.subscriptionId = subscriptionId + self.subscriptionTargetId = subscriptionTargetId + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case assets = "assets" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case grantedEntity = "grantedEntity" + case id = "id" + case status = "status" + case subscriptionId = "subscriptionId" + case subscriptionTargetId = "subscriptionTargetId" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct UpdateSubscriptionRequestInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The identifier of the Amazon DataZone domain in which a subscription request is to be updated. + public let domainIdentifier: String + /// The identifier of the subscription request that is to be updated. + public let identifier: String + /// The reason for the UpdateSubscriptionRequest action. + public let requestReason: String + + public init(domainIdentifier: String, identifier: String, requestReason: String) { + self.domainIdentifier = domainIdentifier + self.identifier = identifier + self.requestReason = requestReason + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.requestReason, name: "requestReason", parent: name, max: 4096) + try self.validate(self.requestReason, name: "requestReason", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case requestReason = "requestReason" + } + } + + public struct UpdateSubscriptionRequestOutput: AWSDecodableShape { + /// The timestamp of when the subscription request was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription request. + public let createdBy: String + /// The decision comment of the UpdateSubscriptionRequest action. + public let decisionComment: String? + /// The identifier of the Amazon DataZone domain in which a subscription request is to be updated. + public let domainId: String + /// The identifier of the subscription request that is to be updated. + public let id: String + /// The reason for the UpdateSubscriptionRequest action. + public let requestReason: String + /// The identifier of the Amazon DataZone user who reviews the subscription request. + public let reviewerId: String? + /// The status of the subscription request. + public let status: SubscriptionRequestStatus + /// The subscribed listings of the subscription request. + public let subscribedListings: [SubscribedListing] + /// The subscribed principals of the subscription request. + public let subscribedPrincipals: [SubscribedPrincipal] + /// The timestamp of when the subscription request was updated. + public let updatedAt: Date + /// The Amazon DataZone user who updated the subscription request. + public let updatedBy: String? + + public init(createdAt: Date, createdBy: String, decisionComment: String? = nil, domainId: String, id: String, requestReason: String, reviewerId: String? = nil, status: SubscriptionRequestStatus, subscribedListings: [SubscribedListing], subscribedPrincipals: [SubscribedPrincipal], updatedAt: Date, updatedBy: String? = nil) { + self.createdAt = createdAt + self.createdBy = createdBy + self.decisionComment = decisionComment + self.domainId = domainId + self.id = id + self.requestReason = requestReason + self.reviewerId = reviewerId + self.status = status + self.subscribedListings = subscribedListings + self.subscribedPrincipals = subscribedPrincipals + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case createdBy = "createdBy" + case decisionComment = "decisionComment" + case domainId = "domainId" + case id = "id" + case requestReason = "requestReason" + case reviewerId = "reviewerId" + case status = "status" + case subscribedListings = "subscribedListings" + case subscribedPrincipals = "subscribedPrincipals" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct UpdateSubscriptionTargetInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "environmentIdentifier", location: .uri("environmentIdentifier")), + AWSMemberEncoding(label: "identifier", location: .uri("identifier")) + ] + + /// The applicable asset types to be updated as part of the UpdateSubscriptionTarget action. + public let applicableAssetTypes: [String]? + /// The authorized principals to be updated as part of the UpdateSubscriptionTarget action. + public let authorizedPrincipals: [String]? + /// The identifier of the Amazon DataZone domain in which a subscription target is to be updated. + public let domainIdentifier: String + /// The identifier of the environment in which a subscription target is to be updated. + public let environmentIdentifier: String + /// Identifier of the subscription target that is to be updated. + public let identifier: String + /// The manage access role to be updated as part of the UpdateSubscriptionTarget action. + public let manageAccessRole: String? + /// The name to be updated as part of the UpdateSubscriptionTarget action. + public let name: String? + /// The provider to be updated as part of the UpdateSubscriptionTarget action. + public let provider: String? + /// The configuration to be updated as part of the UpdateSubscriptionTarget action. + public let subscriptionTargetConfig: [SubscriptionTargetForm]? + + public init(applicableAssetTypes: [String]? = nil, authorizedPrincipals: [String]? = nil, domainIdentifier: String, environmentIdentifier: String, identifier: String, manageAccessRole: String? = nil, name: String? = nil, provider: String? = nil, subscriptionTargetConfig: [SubscriptionTargetForm]? = nil) { + self.applicableAssetTypes = applicableAssetTypes + self.authorizedPrincipals = authorizedPrincipals + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.identifier = identifier + self.manageAccessRole = manageAccessRole + self.name = name + self.provider = provider + self.subscriptionTargetConfig = subscriptionTargetConfig + } + + public func validate(name: String) throws { + try self.applicableAssetTypes?.forEach { + try validate($0, name: "applicableAssetTypes[]", parent: name, max: 256) + try validate($0, name: "applicableAssetTypes[]", parent: name, min: 1) + try validate($0, name: "applicableAssetTypes[]", parent: name, pattern: "^[^\\.]*") + } + try self.authorizedPrincipals?.forEach { + try validate($0, name: "authorizedPrincipals[]", parent: name, pattern: "^[a-zA-Z0-9:/_-]*$") + } + try self.validate(self.authorizedPrincipals, name: "authorizedPrincipals", parent: name, max: 10) + try self.validate(self.authorizedPrincipals, name: "authorizedPrincipals", parent: name, min: 1) + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.subscriptionTargetConfig?.forEach { + try $0.validate(name: "\(name).subscriptionTargetConfig[]") + } + } + + private enum CodingKeys: String, CodingKey { + case applicableAssetTypes = "applicableAssetTypes" + case authorizedPrincipals = "authorizedPrincipals" + case manageAccessRole = "manageAccessRole" + case name = "name" + case provider = "provider" + case subscriptionTargetConfig = "subscriptionTargetConfig" + } + } + + public struct UpdateSubscriptionTargetOutput: AWSDecodableShape { + /// The applicable asset types to be updated as part of the UpdateSubscriptionTarget action. + public let applicableAssetTypes: [String] + /// The authorized principals to be updated as part of the UpdateSubscriptionTarget action. + public let authorizedPrincipals: [String] + /// The timestamp of when a subscription target was created. + public let createdAt: Date + /// The Amazon DataZone user who created the subscription target. + public let createdBy: String + /// The identifier of the Amazon DataZone domain in which a subscription target is to be updated. + public let domainId: String + /// The identifier of the environment in which a subscription target is to be updated. + public let environmentId: String + /// Identifier of the subscription target that is to be updated. + public let id: String + /// The manage access role to be updated as part of the UpdateSubscriptionTarget action. + public let manageAccessRole: String + /// The name to be updated as part of the UpdateSubscriptionTarget action. + public let name: String + /// The identifier of the project in which a subscription target is to be updated. + public let projectId: String + /// The provider to be updated as part of the UpdateSubscriptionTarget action. + public let provider: String + /// The configuration to be updated as part of the UpdateSubscriptionTarget action. + public let subscriptionTargetConfig: [SubscriptionTargetForm] + /// The type to be updated as part of the UpdateSubscriptionTarget action. + public let type: String + /// The timestamp of when the subscription target was updated. + public let updatedAt: Date? + /// The Amazon DataZone user who updated the subscription target. + public let updatedBy: String? + + public init(applicableAssetTypes: [String], authorizedPrincipals: [String], createdAt: Date, createdBy: String, domainId: String, environmentId: String, id: String, manageAccessRole: String, name: String, projectId: String, provider: String, subscriptionTargetConfig: [SubscriptionTargetForm], type: String, updatedAt: Date? = nil, updatedBy: String? = nil) { + self.applicableAssetTypes = applicableAssetTypes + self.authorizedPrincipals = authorizedPrincipals + self.createdAt = createdAt + self.createdBy = createdBy + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.manageAccessRole = manageAccessRole + self.name = name + self.projectId = projectId + self.provider = provider + self.subscriptionTargetConfig = subscriptionTargetConfig + self.type = type + self.updatedAt = updatedAt + self.updatedBy = updatedBy + } + + private enum CodingKeys: String, CodingKey { + case applicableAssetTypes = "applicableAssetTypes" + case authorizedPrincipals = "authorizedPrincipals" + case createdAt = "createdAt" + case createdBy = "createdBy" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case manageAccessRole = "manageAccessRole" + case name = "name" + case projectId = "projectId" + case provider = "provider" + case subscriptionTargetConfig = "subscriptionTargetConfig" + case type = "type" + case updatedAt = "updatedAt" + case updatedBy = "updatedBy" + } + } + + public struct UpdateUserProfileInput: AWSEncodableShape { + public static var _encoding = [ + AWSMemberEncoding(label: "domainIdentifier", location: .uri("domainIdentifier")), + AWSMemberEncoding(label: "userIdentifier", location: .uri("userIdentifier")) + ] + + /// The identifier of the Amazon DataZone domain in which a user profile is updated. + public let domainIdentifier: String + /// The status of the user profile that are to be updated. + public let status: UserProfileStatus + /// The type of the user profile that are to be updated. + public let type: UserProfileType? + /// The identifier of the user whose user profile is to be updated. + public let userIdentifier: String + + public init(domainIdentifier: String, status: UserProfileStatus, type: UserProfileType? = nil, userIdentifier: String) { + self.domainIdentifier = domainIdentifier + self.status = status + self.type = type + self.userIdentifier = userIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.userIdentifier, name: "userIdentifier", parent: name, pattern: "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[a-zA-Z_0-9+=,.@-]+$|^arn:aws:iam::\\d{12}:.+$)") + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + case type = "type" + } + } + + public struct UpdateUserProfileOutput: AWSDecodableShape { + public let details: UserProfileDetails? + /// The identifier of the Amazon DataZone domain in which a user profile is updated. + public let domainId: String? + /// The identifier of the user profile. + public let id: String? + /// The status of the user profile. + public let status: UserProfileStatus? + /// The type of the user profile. + public let type: UserProfileType? + + public init(details: UserProfileDetails? = nil, domainId: String? = nil, id: String? = nil, status: UserProfileStatus? = nil, type: UserProfileType? = nil) { + self.details = details + self.domainId = domainId + self.id = id + self.status = status + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case details = "details" + case domainId = "domainId" + case id = "id" + case status = "status" + case type = "type" + } + } + + public struct UserDetails: AWSDecodableShape { + /// The identifier of the Amazon DataZone user. + public let userId: String + + public init(userId: String) { + self.userId = userId + } + + private enum CodingKeys: String, CodingKey { + case userId = "userId" + } + } + + public struct UserProfileSummary: AWSDecodableShape { + /// The details of the user profile. + public let details: UserProfileDetails? + /// The ID of the Amazon DataZone domain of the user profile. + public let domainId: String? + /// The ID of the user profile. + public let id: String? + /// The status of the user profile. + public let status: UserProfileStatus? + /// The type of the user profile. + public let type: UserProfileType? + + public init(details: UserProfileDetails? = nil, domainId: String? = nil, id: String? = nil, status: UserProfileStatus? = nil, type: UserProfileType? = nil) { + self.details = details + self.domainId = domainId + self.id = id + self.status = status + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case details = "details" + case domainId = "domainId" + case id = "id" + case status = "status" + case type = "type" + } + } + + public struct GrantedEntity: AWSDecodableShape { + /// The listing for which a subscription is granted. + public let listing: ListingRevision? + + public init(listing: ListingRevision? = nil) { + self.listing = listing + } + + private enum CodingKeys: String, CodingKey { + case listing = "listing" + } + } + + public struct GrantedEntityInput: AWSEncodableShape { + /// The listing for which a subscription is to be granted. + public let listing: ListingRevisionInput? + + public init(listing: ListingRevisionInput? = nil) { + self.listing = listing + } + + public func validate(name: String) throws { + try self.listing?.validate(name: "\(name).listing") + } + + private enum CodingKeys: String, CodingKey { + case listing = "listing" + } + } + + public struct ListingItem: AWSDecodableShape { + /// An asset published in an Amazon DataZone catalog. + public let assetListing: AssetListing? + + public init(assetListing: AssetListing? = nil) { + self.assetListing = assetListing + } + + private enum CodingKeys: String, CodingKey { + case assetListing = "assetListing" + } + } + + public struct Model: AWSEncodableShape & AWSDecodableShape { + public let smithy: String? + + public init(smithy: String? = nil) { + self.smithy = smithy + } + + public func validate(name: String) throws { + try self.validate(self.smithy, name: "smithy", parent: name, max: 10000) + try self.validate(self.smithy, name: "smithy", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case smithy = "smithy" + } + } + + public struct ProvisioningProperties: AWSDecodableShape { + /// The cloud formation properties included as part of the provisioning properties of an environment blueprint. + public let cloudFormation: CloudFormationProperties? + + public init(cloudFormation: CloudFormationProperties? = nil) { + self.cloudFormation = cloudFormation + } + + private enum CodingKeys: String, CodingKey { + case cloudFormation = "cloudFormation" + } + } + + public struct SearchResultItem: AWSDecodableShape { + /// The asset listing included in the results of the SearchListings action. + public let assetListing: AssetListingItem? + + public init(assetListing: AssetListingItem? = nil) { + self.assetListing = assetListing + } + + private enum CodingKeys: String, CodingKey { + case assetListing = "assetListing" + } + } + + public struct SubscribedListingItem: AWSDecodableShape { + /// The asset for which the subscription grant is created. + public let assetListing: SubscribedAssetListing? + + public init(assetListing: SubscribedAssetListing? = nil) { + self.assetListing = assetListing + } + + private enum CodingKeys: String, CodingKey { + case assetListing = "assetListing" + } + } + + public struct SubscribedPrincipal: AWSDecodableShape { + /// The project that has the subscription grant. + public let project: SubscribedProject? + + public init(project: SubscribedProject? = nil) { + self.project = project + } + + private enum CodingKeys: String, CodingKey { + case project = "project" + } + } + + public struct SubscribedPrincipalInput: AWSEncodableShape { + /// The project that is to be given a subscription grant. + public let project: SubscribedProjectInput? + + public init(project: SubscribedProjectInput? = nil) { + self.project = project + } + + public func validate(name: String) throws { + try self.project?.validate(name: "\(name).project") + } + + private enum CodingKeys: String, CodingKey { + case project = "project" + } + } +} + +// MARK: - Errors + +/// Error enum for DataZone +public struct DataZoneErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case unauthorizedException = "UnauthorizedException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize DataZone + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You do not have sufficient access to perform this action. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// There is a conflict while performing this action. + public static var conflictException: Self { .init(.conflictException) } + /// The request has failed because of an unknown error, exception or failure. + public static var internalServerException: Self { .init(.internalServerException) } + /// The specified resource cannot be found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The request has exceeded the specified service quota. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// The request was denied due to request throttling. + public static var throttlingException: Self { .init(.throttlingException) } + /// You do not have permission to perform this action. + public static var unauthorizedException: Self { .init(.unauthorizedException) } + /// The input fails to satisfy the constraints specified by the Amazon Web Services service. + public static var validationException: Self { .init(.validationException) } +} + +extension DataZoneErrorType: Equatable { + public static func == (lhs: DataZoneErrorType, rhs: DataZoneErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension DataZoneErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/EC2/EC2_api+async.swift b/Sources/Soto/Services/EC2/EC2_api+async.swift index c1b0e8323c..c9b79deeb5 100644 --- a/Sources/Soto/Services/EC2/EC2_api+async.swift +++ b/Sources/Soto/Services/EC2/EC2_api+async.swift @@ -142,7 +142,7 @@ extension EC2 { return try await self.client.execute(operation: "AssociateIpamResourceDiscovery", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide. + /// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide. When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. public func associateNatGatewayAddress(_ input: AssociateNatGatewayAddressRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> AssociateNatGatewayAddressResult { return try await self.client.execute(operation: "AssociateNatGatewayAddress", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -509,7 +509,7 @@ extension EC2 { return try await self.client.execute(operation: "CreateManagedPrefixList", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a NAT gateway in the specified subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. You can create either a public NAT gateway or a private NAT gateway. With a public NAT gateway, internet-bound traffic from a private subnet can be routed to the NAT gateway, so that instances in a private subnet can connect to the internet. With a private NAT gateway, private communication is routed across VPCs and on-premises networks through a transit gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks. For more information, see NAT gateways in the Amazon VPC User Guide. + /// Creates a NAT gateway in the specified subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. You can create either a public NAT gateway or a private NAT gateway. With a public NAT gateway, internet-bound traffic from a private subnet can be routed to the NAT gateway, so that instances in a private subnet can connect to the internet. With a private NAT gateway, private communication is routed across VPCs and on-premises networks through a transit gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks. For more information, see NAT gateways in the Amazon VPC User Guide. When you create a public NAT gateway and assign it an EIP or secondary EIPs, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the NAT gateway will fail to launch. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. public func createNatGateway(_ input: CreateNatGatewayRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateNatGatewayResult { return try await self.client.execute(operation: "CreateNatGateway", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -1123,7 +1123,7 @@ extension EC2 { return try await self.client.execute(operation: "DeleteVolume", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on. + /// Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on. When you delete the VPC, it deletes the VPC's default security group, network ACL, and route table. public func deleteVpc(_ input: DeleteVpcRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { return try await self.client.execute(operation: "DeleteVpc", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -1606,7 +1606,7 @@ extension EC2 { return try await self.client.execute(operation: "DescribeNetworkInterfacePermissions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Describes one or more of your network interfaces. + /// Describes one or more of your network interfaces. If you have a large number of network interfaces, the operation fails unless you use pagination or one of the following filters: group-id, mac-address, private-dns-name, private-ip-address, private-dns-name, subnet-id, or vpc-id. public func describeNetworkInterfaces(_ input: DescribeNetworkInterfacesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeNetworkInterfacesResult { return try await self.client.execute(operation: "DescribeNetworkInterfaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -2001,6 +2001,11 @@ extension EC2 { return try await self.client.execute(operation: "DisableFastSnapshotRestores", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Sets the AMI state to disabled and removes all launch permissions from the AMI. A disabled AMI can't be used for instance launches. A disabled AMI can't be shared. If a public or shared AMI was previously shared, it is made private. If an AMI was shared with an Amazon Web Services account, organization, or Organizational Unit, they lose access to the disabled AMI. A disabled AMI does not appear in DescribeImages API calls by default. Only the AMI owner can disable an AMI. You can re-enable a disabled AMI using EnableImage. For more information, see Disable an AMI in the Amazon EC2 User Guide. + public func disableImage(_ input: DisableImageRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DisableImageResult { + return try await self.client.execute(operation: "DisableImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Disables block public access for AMIs at the account level in the specified Amazon Web Services Region. This removes the block public access restriction from your account. With the restriction removed, you can publicly share your AMIs in the specified Amazon Web Services Region. The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. When the API has completed the configuration, the response will be unblocked. For more information, see Block public access to your AMIs in the Amazon EC2 User Guide. public func disableImageBlockPublicAccess(_ input: DisableImageBlockPublicAccessRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DisableImageBlockPublicAccessResult { return try await self.client.execute(operation: "DisableImageBlockPublicAccess", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -2155,6 +2160,11 @@ extension EC2 { return try await self.client.execute(operation: "EnableFastSnapshotRestores", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services accounts, organizations, and Organizational Units that lost access to the AMI when it was disabled do not regain access automatically. Once the AMI is available, it can be shared with them again. Only the AMI owner can re-enable a disabled AMI. For more information, see Disable an AMI in the Amazon EC2 User Guide. + public func enableImage(_ input: EnableImageRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> EnableImageResult { + return try await self.client.execute(operation: "EnableImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Enables block public access for AMIs at the account level in the specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already have public AMIs, they will remain publicly available. The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be unblocked. When the API has completed the configuration, the response will be block-new-sharing. For more information, see Block public access to your AMIs in the Amazon EC2 User Guide. public func enableImageBlockPublicAccess(_ input: EnableImageBlockPublicAccessRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> EnableImageBlockPublicAccessResult { return try await self.client.execute(operation: "EnableImageBlockPublicAccess", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -4526,7 +4536,7 @@ extension EC2 { ) } - /// Describes one or more of your network interfaces. + /// Describes one or more of your network interfaces. If you have a large number of network interfaces, the operation fails unless you use pagination or one of the following filters: group-id, mac-address, private-dns-name, private-ip-address, private-dns-name, subnet-id, or vpc-id. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index 964bedd856..a146dd3d94 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -207,7 +207,7 @@ public struct EC2: AWSService { return self.client.execute(operation: "AssociateIpamResourceDiscovery", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide. + /// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide. When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. public func associateNatGatewayAddress(_ input: AssociateNatGatewayAddressRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "AssociateNatGatewayAddress", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -574,7 +574,7 @@ public struct EC2: AWSService { return self.client.execute(operation: "CreateManagedPrefixList", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a NAT gateway in the specified subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. You can create either a public NAT gateway or a private NAT gateway. With a public NAT gateway, internet-bound traffic from a private subnet can be routed to the NAT gateway, so that instances in a private subnet can connect to the internet. With a private NAT gateway, private communication is routed across VPCs and on-premises networks through a transit gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks. For more information, see NAT gateways in the Amazon VPC User Guide. + /// Creates a NAT gateway in the specified subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. You can create either a public NAT gateway or a private NAT gateway. With a public NAT gateway, internet-bound traffic from a private subnet can be routed to the NAT gateway, so that instances in a private subnet can connect to the internet. With a private NAT gateway, private communication is routed across VPCs and on-premises networks through a transit gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks. For more information, see NAT gateways in the Amazon VPC User Guide. When you create a public NAT gateway and assign it an EIP or secondary EIPs, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the NAT gateway will fail to launch. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. public func createNatGateway(_ input: CreateNatGatewayRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateNatGateway", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -1188,7 +1188,7 @@ public struct EC2: AWSService { return self.client.execute(operation: "DeleteVolume", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on. + /// Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on. When you delete the VPC, it deletes the VPC's default security group, network ACL, and route table. @discardableResult public func deleteVpc(_ input: DeleteVpcRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteVpc", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -1671,7 +1671,7 @@ public struct EC2: AWSService { return self.client.execute(operation: "DescribeNetworkInterfacePermissions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Describes one or more of your network interfaces. + /// Describes one or more of your network interfaces. If you have a large number of network interfaces, the operation fails unless you use pagination or one of the following filters: group-id, mac-address, private-dns-name, private-ip-address, private-dns-name, subnet-id, or vpc-id. public func describeNetworkInterfaces(_ input: DescribeNetworkInterfacesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeNetworkInterfaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -2066,6 +2066,11 @@ public struct EC2: AWSService { return self.client.execute(operation: "DisableFastSnapshotRestores", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Sets the AMI state to disabled and removes all launch permissions from the AMI. A disabled AMI can't be used for instance launches. A disabled AMI can't be shared. If a public or shared AMI was previously shared, it is made private. If an AMI was shared with an Amazon Web Services account, organization, or Organizational Unit, they lose access to the disabled AMI. A disabled AMI does not appear in DescribeImages API calls by default. Only the AMI owner can disable an AMI. You can re-enable a disabled AMI using EnableImage. For more information, see Disable an AMI in the Amazon EC2 User Guide. + public func disableImage(_ input: DisableImageRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DisableImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Disables block public access for AMIs at the account level in the specified Amazon Web Services Region. This removes the block public access restriction from your account. With the restriction removed, you can publicly share your AMIs in the specified Amazon Web Services Region. The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. When the API has completed the configuration, the response will be unblocked. For more information, see Block public access to your AMIs in the Amazon EC2 User Guide. public func disableImageBlockPublicAccess(_ input: DisableImageBlockPublicAccessRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DisableImageBlockPublicAccess", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -2220,6 +2225,11 @@ public struct EC2: AWSService { return self.client.execute(operation: "EnableFastSnapshotRestores", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services accounts, organizations, and Organizational Units that lost access to the AMI when it was disabled do not regain access automatically. Once the AMI is available, it can be shared with them again. Only the AMI owner can re-enable a disabled AMI. For more information, see Disable an AMI in the Amazon EC2 User Guide. + public func enableImage(_ input: EnableImageRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "EnableImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Enables block public access for AMIs at the account level in the specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already have public AMIs, they will remain publicly available. The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be unblocked. When the API has completed the configuration, the response will be block-new-sharing. For more information, see Block public access to your AMIs in the Amazon EC2 User Guide. public func enableImageBlockPublicAccess(_ input: EnableImageBlockPublicAccessRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "EnableImageBlockPublicAccess", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -6459,7 +6469,7 @@ extension EC2 { ) } - /// Describes one or more of your network interfaces. + /// Describes one or more of your network interfaces. If you have a large number of network interfaces, the operation fails unless you use pagination or one of the following filters: group-id, mac-address, private-dns-name, private-ip-address, private-dns-name, subnet-id, or vpc-id. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. @@ -10806,6 +10816,7 @@ extension EC2.DescribeImagesRequest: AWSPaginateToken { filters: self.filters, imageIds: self.imageIds, includeDeprecated: self.includeDeprecated, + includeDisabled: self.includeDisabled, maxResults: self.maxResults, nextToken: token, owners: self.owners diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index 35aaf7c048..71702b09f0 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -329,6 +329,7 @@ extension EC2 { case rhelWithSqlServerStandard = "RHEL with SQL Server Standard" case rhelWithSqlServerWeb = "RHEL with SQL Server Web" case suseLinux = "SUSE Linux" + case ubuntuProLinux = "Ubuntu Pro" case windows = "Windows" case windowsWithSqlServer = "Windows with SQL Server" case windowsWithSqlServerEnterprise = "Windows with SQL Server Enterprise" @@ -880,6 +881,7 @@ extension EC2 { public enum ImageState: String, CustomStringConvertible, Codable, Sendable { case available = "available" case deregistered = "deregistered" + case disabled = "disabled" case error = "error" case failed = "failed" case invalid = "invalid" @@ -3917,7 +3919,7 @@ extension EC2 { public let domain: DomainType? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. Use DescribeAvailabilityZones to view the network border groups. You cannot use a network border group with EC2 Classic. If you attempt this operation on EC2 Classic, you receive an InvalidParameterCombination error. + /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. Use DescribeAvailabilityZones to view the network border groups. public let networkBorderGroup: String? /// The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead. public let publicIpv4Pool: String? @@ -6271,7 +6273,7 @@ extension EC2 { } public struct CancelExportTaskRequest: AWSEncodableShape { - /// The ID of the export task. This is the ID returned by CreateInstanceExportTask. + /// The ID of the export task. This is the ID returned by the CreateInstanceExportTask and ExportImage operations. public let exportTaskId: String? public init(exportTaskId: String? = nil) { @@ -8905,7 +8907,7 @@ extension EC2 { public let replaceUnhealthyInstances: Bool? /// Describes the configuration of Spot Instances in an EC2 Fleet. public let spotOptions: SpotOptionsRequest? - /// The key-value pair for tagging the EC2 Fleet request on creation. For more information, see Tagging your resources. If the fleet type is instant, specify a resource type of fleet to tag the fleet or instance to tag the instances at launch. If the fleet type is maintain or request, specify a resource type of fleet to tag the fleet. You cannot specify a resource type of instance. To tag instances at launch, specify the tags in a launch template. + /// The key-value pair for tagging the EC2 Fleet request on creation. For more information, see Tag your resources. If the fleet type is instant, specify a resource type of fleet to tag the fleet or instance to tag the instances at launch. If the fleet type is maintain or request, specify a resource type of fleet to tag the fleet. You cannot specify a resource type of instance. To tag instances at launch, specify the tags in a launch template. @OptionalCustomCoding> public var tagSpecifications: [TagSpecification]? /// The number of units to request. @@ -17881,7 +17883,7 @@ extension EC2 { /// or all (public AMIs). If you specify an Amazon Web Services account ID that is not your own, only AMIs shared with that specific Amazon Web Services account ID are returned. However, AMIs that are shared with the account’s organization or organizational unit (OU) are not returned. If you specify self or your own Amazon Web Services account ID, AMIs shared with your account are returned. In addition, AMIs that are shared with the organization or OU of which you are member are also returned. If you specify all, all public AMIs are returned. @OptionalCustomCoding> public var executableUsers: [String]? - /// The filters. architecture - The image architecture (i386 | x86_64 | arm64 | x86_64_mac | arm64_mac). block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume. block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in GiB. block-device-mapping.volume-type - The volume type of the Amazon EBS volume (io1 | io2 | gp2 | gp3 | sc1 | st1 | standard). block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS volume is encrypted. creation-date - The time when the image was created, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. description - The description of the image (provided during image creation). ena-support - A Boolean that indicates whether enhanced networking with ENA is enabled. hypervisor - The hypervisor type (ovm | xen). image-id - The ID of the image. image-type - The image type (machine | kernel | ramdisk). is-public - A Boolean that indicates whether the image is public. kernel-id - The kernel ID. manifest-location - The location of the image manifest. name - The name of the AMI (provided during image creation). owner-alias - The owner alias (amazon | aws-marketplace). The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set using the IAM console. We recommend that you use the Owner request parameter instead of this filter. owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the Owner request parameter instead of this filter. platform - The platform. The only supported value is windows. product-code - The product code. product-code.type - The type of the product code (marketplace). ramdisk-id - The RAM disk ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). state - The state of the image (available | pending | failed). state-reason-code - The reason code for the state change. state-reason-message - The message for the state change. sriov-net-support - A value of simple indicates that enhanced networking with the Intel 82599 VF interface is enabled. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. virtualization-type - The virtualization type (paravirtual | hvm). + /// The filters. architecture - The image architecture (i386 | x86_64 | arm64 | x86_64_mac | arm64_mac). block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume. block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in GiB. block-device-mapping.volume-type - The volume type of the Amazon EBS volume (io1 | io2 | gp2 | gp3 | sc1 | st1 | standard). block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS volume is encrypted. creation-date - The time when the image was created, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. description - The description of the image (provided during image creation). ena-support - A Boolean that indicates whether enhanced networking with ENA is enabled. hypervisor - The hypervisor type (ovm | xen). image-id - The ID of the image. image-type - The image type (machine | kernel | ramdisk). is-public - A Boolean that indicates whether the image is public. kernel-id - The kernel ID. manifest-location - The location of the image manifest. name - The name of the AMI (provided during image creation). owner-alias - The owner alias (amazon | aws-marketplace). The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set using the IAM console. We recommend that you use the Owner request parameter instead of this filter. owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the Owner request parameter instead of this filter. platform - The platform. The only supported value is windows. product-code - The product code. product-code.type - The type of the product code (marketplace). ramdisk-id - The RAM disk ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-instance-id - The ID of the instance that the AMI was created from if the AMI was created using CreateImage. This filter is applicable only if the AMI was created using CreateImage. state - The state of the image (available | pending | failed). state-reason-code - The reason code for the state change. state-reason-message - The message for the state change. sriov-net-support - A value of simple indicates that enhanced networking with the Intel 82599 VF interface is enabled. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. virtualization-type - The virtualization type (paravirtual | hvm). @OptionalCustomCoding> public var filters: [Filter]? /// The image IDs. Default: Describes all images available to you. @@ -17889,6 +17891,8 @@ extension EC2 { public var imageIds: [String]? /// Specifies whether to include deprecated AMIs. Default: No deprecated AMIs are included in the response. If you are the AMI owner, all deprecated AMIs appear in the response regardless of what you specify for this parameter. public let includeDeprecated: Bool? + /// Specifies whether to include disabled AMIs. Default: No disabled AMIs are included in the response. + public let includeDisabled: Bool? /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// For more information, see Pagination. public let maxResults: Int? @@ -17898,12 +17902,13 @@ extension EC2 { @OptionalCustomCoding> public var owners: [String]? - public init(dryRun: Bool? = nil, executableUsers: [String]? = nil, filters: [Filter]? = nil, imageIds: [String]? = nil, includeDeprecated: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, owners: [String]? = nil) { + public init(dryRun: Bool? = nil, executableUsers: [String]? = nil, filters: [Filter]? = nil, imageIds: [String]? = nil, includeDeprecated: Bool? = nil, includeDisabled: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, owners: [String]? = nil) { self.dryRun = dryRun self.executableUsers = executableUsers self.filters = filters self.imageIds = imageIds self.includeDeprecated = includeDeprecated + self.includeDisabled = includeDisabled self.maxResults = maxResults self.nextToken = nextToken self.owners = owners @@ -17915,6 +17920,7 @@ extension EC2 { case filters = "Filter" case imageIds = "ImageId" case includeDeprecated = "IncludeDeprecated" + case includeDisabled = "IncludeDisabled" case maxResults = "MaxResults" case nextToken = "NextToken" case owners = "Owner" @@ -18458,7 +18464,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. license-pool - maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.availability-zone - The Availability Zone for the network interface. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. @OptionalCustomCoding> public var filters: [Filter]? /// The instance IDs. Default: Describes all your instances. @@ -20109,12 +20115,22 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// One or more filters. addresses.private-ip-address - The private IPv4 addresses associated with the network interface. addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface. addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4). addresses.association.owner-id - The owner ID of the addresses associated with the network interface. association.association-id - The association ID returned when the network interface was associated with an IPv4 address. association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. association.public-dns-name - The public DNS name for the network interface (IPv4). attachment.attachment-id - The ID of the interface attachment. attachment.attach-time - The time that the network interface was attached to an instance. attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated. attachment.device-index - The device index to which the network interface is attached. attachment.instance-id - The ID of the instance to which the network interface is attached. attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. attachment.status - The status of the attachment (attaching | attached | detaching | detached). availability-zone - The Availability Zone of the network interface. description - The description of the network interface. group-id - The ID of a security group associated with the network interface. group-name - The name of a security group associated with the network interface. ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface. interface-type - The type of network interface (api_gateway_managed | - /// aws_codestar_connections_managed | branch | efa | - /// gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | - /// interface | iot_rules_managed | lambda | load_balancer | - /// nat_gateway | network_load_balancer | quicksight | - /// transit_gateway | trunk | vpc_endpoint). mac-address - The MAC address of the network interface. network-interface-id - The ID of the network interface. owner-id - The Amazon Web Services account ID of the network interface owner. private-ip-address - The private IPv4 address or addresses of the network interface. private-dns-name - The private DNS name of the network interface (IPv4). requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service + /// One or more filters. association.allocation-id - The allocation ID returned when you + /// allocated the Elastic IP address (IPv4) for your network interface. association.association-id - The association ID returned when the + /// network interface was associated with an IPv4 address. addresses.association.owner-id - The owner ID of the addresses associated with the network interface. addresses.association.public-ip - The association ID returned when + /// the network interface was associated with the Elastic IP address + /// (IPv4). addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface. addresses.private-ip-address - The private IPv4 addresses + /// associated with the network interface. association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. association.public-dns-name - The public DNS name for the network interface (IPv4). attachment.attach-time - The time that the network interface was attached to an instance. attachment.attachment-id - The ID of the interface attachment. attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated. attachment.device-index - The device index to which the network interface is attached. attachment.instance-id - The ID of the instance to which the network interface is attached. attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. attachment.status - The status of the attachment (attaching | attached | detaching | detached). availability-zone - The Availability Zone of the network interface. description - The description of the network interface. group-id - The ID of a security group associated with the network interface. ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface. interface-type - The type of network interface (api_gateway_managed | + /// aws_codestar_connections_managed | branch | + /// ec2_instance_connect_endpoint | efa | efs | + /// gateway_load_balancer | gateway_load_balancer_endpoint | + /// global_accelerator_managed | + /// interface | iot_rules_managed | + /// lambda | load_balancer | + /// nat_gateway | network_load_balancer | + /// quicksight | + /// transit_gateway | trunk | + /// vpc_endpoint). mac-address - The MAC address of the network interface. network-interface-id - The ID of the network interface. owner-id - The Amazon Web Services account ID of the network interface owner. private-dns-name - The private DNS name of the network interface (IPv4). private-ip-address - The private IPv4 address or addresses of the network interface. requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service /// (for example, Amazon Web Services Management Console, Auto Scaling, and so on). source-dest-check - Indicates whether the network interface performs source/destination checking. /// A value of true means checking is enabled, and false means checking is disabled. /// The value must be false for the network interface to perform network address translation (NAT) in your VPC. status - The status of the network interface. If the network interface is not attached to an instance, the status is available; @@ -21631,14 +21647,14 @@ extension EC2 { /// and provides an error response. If you have the required permissions, the error response is /// DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The filters. task-state - Returns tasks in a certain state (InProgress | Completed | Failed) bucket - Returns task information for tasks that targeted a specific bucket. For the filter value, specify the bucket name. + /// The filters. task-state - Returns tasks in a certain state (InProgress | Completed | Failed) bucket - Returns task information for tasks that targeted a specific bucket. For the filter value, specify the bucket name. When you specify the ImageIds parameter, any filters that you specify are ignored. To use the filters, you must remove the ImageIds parameter. @OptionalCustomCoding> public var filters: [Filter]? /// The AMI IDs for which to show progress. Up to 20 AMI IDs can be included in a request. @OptionalCustomCoding> public var imageIds: [String]? /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. - /// For more information, see Pagination. You cannot specify this parameter and the ImageIDs parameter in the same call. + /// For more information, see Pagination. You cannot specify this parameter and the ImageIds parameter in the same call. public let maxResults: Int? /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? @@ -24511,6 +24527,38 @@ extension EC2 { } } + public struct DisableImageRequest: AWSEncodableShape { + /// Checks whether you have the required permissions for the action, without actually making the request, + /// and provides an error response. If you have the required permissions, the error response is + /// DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the AMI. + public let imageId: String? + + public init(dryRun: Bool? = nil, imageId: String? = nil) { + self.dryRun = dryRun + self.imageId = imageId + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case imageId = "ImageId" + } + } + + public struct DisableImageResult: AWSDecodableShape { + /// Returns true if the request succeeds; otherwise, it returns an error. + public let `return`: Bool? + + public init(return: Bool? = nil) { + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case `return` = "return" + } + } + public struct DisableIpamOrganizationAdminAccountRequest: AWSEncodableShape { /// The Organizations member account ID that you want to disable as IPAM account. public let delegatedAdminAccountId: String? @@ -26182,6 +26230,38 @@ extension EC2 { } } + public struct EnableImageRequest: AWSEncodableShape { + /// Checks whether you have the required permissions for the action, without actually making the request, + /// and provides an error response. If you have the required permissions, the error response is + /// DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the AMI. + public let imageId: String? + + public init(dryRun: Bool? = nil, imageId: String? = nil) { + self.dryRun = dryRun + self.imageId = imageId + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case imageId = "ImageId" + } + } + + public struct EnableImageResult: AWSDecodableShape { + /// Returns true if the request succeeds; otherwise, it returns an error. + public let `return`: Bool? + + public init(return: Bool? = nil) { + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case `return` = "return" + } + } + public struct EnableIpamOrganizationAdminAccountRequest: AWSEncodableShape { /// The Organizations member account ID that you want to enable as the IPAM account. public let delegatedAdminAccountId: String? @@ -30756,7 +30836,7 @@ extension EC2 { public let description: String? /// Specifies whether enhanced networking with ENA is enabled. public let enaSupport: Bool? - /// The hypervisor type of the image. + /// The hypervisor type of the image. Only xen is supported. ovm is not supported. public let hypervisor: HypervisorType? /// The ID of the AMI. public let imageId: String? @@ -30791,6 +30871,8 @@ extension EC2 { public let rootDeviceName: String? /// The type of root device used by the AMI. The AMI can use an Amazon EBS volume or an instance store volume. public let rootDeviceType: DeviceType? + /// The ID of the instance that the AMI was created from if the AMI was created using CreateImage. This field only appears if the AMI was created using CreateImage. + public let sourceInstanceId: String? /// Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled. public let sriovNetSupport: String? /// The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance. @@ -30807,7 +30889,7 @@ extension EC2 { /// The type of virtualization of the AMI. public let virtualizationType: VirtualizationType? - public init(architecture: ArchitectureValues? = nil, blockDeviceMappings: [BlockDeviceMapping]? = nil, bootMode: BootModeValues? = nil, creationDate: String? = nil, deprecationTime: String? = nil, description: String? = nil, enaSupport: Bool? = nil, hypervisor: HypervisorType? = nil, imageId: String? = nil, imageLocation: String? = nil, imageOwnerAlias: String? = nil, imageType: ImageTypeValues? = nil, imdsSupport: ImdsSupportValues? = nil, kernelId: String? = nil, name: String? = nil, ownerId: String? = nil, platform: PlatformValues? = nil, platformDetails: String? = nil, productCodes: [ProductCode]? = nil, public: Bool? = nil, ramdiskId: String? = nil, rootDeviceName: String? = nil, rootDeviceType: DeviceType? = nil, sriovNetSupport: String? = nil, state: ImageState? = nil, stateReason: StateReason? = nil, tags: [Tag]? = nil, tpmSupport: TpmSupportValues? = nil, usageOperation: String? = nil, virtualizationType: VirtualizationType? = nil) { + public init(architecture: ArchitectureValues? = nil, blockDeviceMappings: [BlockDeviceMapping]? = nil, bootMode: BootModeValues? = nil, creationDate: String? = nil, deprecationTime: String? = nil, description: String? = nil, enaSupport: Bool? = nil, hypervisor: HypervisorType? = nil, imageId: String? = nil, imageLocation: String? = nil, imageOwnerAlias: String? = nil, imageType: ImageTypeValues? = nil, imdsSupport: ImdsSupportValues? = nil, kernelId: String? = nil, name: String? = nil, ownerId: String? = nil, platform: PlatformValues? = nil, platformDetails: String? = nil, productCodes: [ProductCode]? = nil, public: Bool? = nil, ramdiskId: String? = nil, rootDeviceName: String? = nil, rootDeviceType: DeviceType? = nil, sourceInstanceId: String? = nil, sriovNetSupport: String? = nil, state: ImageState? = nil, stateReason: StateReason? = nil, tags: [Tag]? = nil, tpmSupport: TpmSupportValues? = nil, usageOperation: String? = nil, virtualizationType: VirtualizationType? = nil) { self.architecture = architecture self.blockDeviceMappings = blockDeviceMappings self.bootMode = bootMode @@ -30831,6 +30913,7 @@ extension EC2 { self.ramdiskId = ramdiskId self.rootDeviceName = rootDeviceName self.rootDeviceType = rootDeviceType + self.sourceInstanceId = sourceInstanceId self.sriovNetSupport = sriovNetSupport self.state = state self.stateReason = stateReason @@ -30864,6 +30947,7 @@ extension EC2 { case ramdiskId = "ramdiskId" case rootDeviceName = "rootDeviceName" case rootDeviceType = "rootDeviceType" + case sourceInstanceId = "sourceInstanceId" case sriovNetSupport = "sriovNetSupport" case state = "imageState" case stateReason = "stateReason" @@ -31072,7 +31156,7 @@ extension EC2 { /// The architecture of the virtual machine. Valid values: i386 | x86_64 public let architecture: String? - /// The boot mode of the virtual machine. + /// The boot mode of the virtual machine. The uefi-preferred boot mode isn't supported for importing images. For more information, see Boot modes in the VM Import/Export User Guide. public let bootMode: BootModeValues? /// The client-specific data. public let clientData: ClientData? @@ -35833,7 +35917,7 @@ extension EC2 { public struct LaunchTemplateTagSpecificationRequest: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The type of resource to tag. The Valid Values are all the resource types that can be tagged. However, when creating a launch template, you can specify tags for the following resource types only: instance | volume | elastic-gpu | network-interface | spot-instances-request To tag a resource after it has been created, see CreateTags. + /// The type of resource to tag. Valid Values lists all resource types for Amazon EC2 that can be tagged. When you create a launch template, you can specify tags for the following resource types only: instance | volume | elastic-gpu | network-interface | spot-instances-request. If the instance does include the resource type that you specify, the instance launch fails. For example, not all instance types include an Elastic GPU. To tag a resource after it has been created, see CreateTags. public let resourceType: ResourceType? /// The tags to apply to the resource. @OptionalCustomCoding> @@ -41112,7 +41196,7 @@ extension EC2 { public let allocationStrategy: FleetOnDemandAllocationStrategy? /// The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type instant. public let capacityReservationOptions: CapacityReservationOptions? - /// The maximum amount per hour for On-Demand Instances that you're willing to pay. + /// The maximum amount per hour for On-Demand Instances that you're willing to pay. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. public let maxTotalPrice: String? /// The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? @@ -41145,7 +41229,7 @@ extension EC2 { public let allocationStrategy: FleetOnDemandAllocationStrategy? /// The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type instant. public let capacityReservationOptions: CapacityReservationOptionsRequest? - /// The maximum amount per hour for On-Demand Instances that you're willing to pay. + /// The maximum amount per hour for On-Demand Instances that you're willing to pay. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. public let maxTotalPrice: String? /// The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? @@ -43182,7 +43266,7 @@ extension EC2 { public let allocationId: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. If you provide an incorrect network border group, you receive an InvalidAddress.NotFound error. You cannot use a network border group with EC2 Classic. If you attempt this operation on EC2 classic, you receive an InvalidParameterCombination error. + /// The set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. If you provide an incorrect network border group, you receive an InvalidAddress.NotFound error. public let networkBorderGroup: String? /// Deprecated. public let publicIp: String? @@ -43775,7 +43859,7 @@ extension EC2 { /// An elastic GPU to associate with the instance. @OptionalCustomCoding> public var elasticGpuSpecifications: [ElasticGpuSpecification]? - /// The elastic inference accelerator for the instance. + /// An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify accelerators from different generations in the same request. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. @OptionalCustomCoding> public var elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAccelerator]? /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. @@ -44847,7 +44931,7 @@ extension EC2 { /// The elastic GPU specification. @OptionalCustomCoding> public var elasticGpuSpecifications: [ElasticGpuSpecificationResponse]? - /// The elastic inference accelerator for the instance. + /// An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify accelerators from different generations in the same request. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. @OptionalCustomCoding> public var elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAcceleratorResponse]? /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. @@ -47587,7 +47671,7 @@ extension EC2 { public let onDemandAllocationStrategy: OnDemandAllocationStrategy? /// The number of On-Demand units fulfilled by this request compared to the set target On-Demand capacity. public let onDemandFulfilledCapacity: Double? - /// The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. + /// The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The onDemandMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. public let onDemandMaxTotalPrice: String? /// The number of On-Demand units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later. public let onDemandTargetCapacity: Int? @@ -47595,11 +47679,11 @@ extension EC2 { public let replaceUnhealthyInstances: Bool? /// The strategies for managing your Spot Instances that are at an elevated risk of being interrupted. public let spotMaintenanceStrategies: SpotMaintenanceStrategies? - /// The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. + /// The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The spotMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. public let spotMaxTotalPrice: String? /// The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. public let spotPrice: String? - /// The key-value pair for tagging the Spot Fleet request on creation. The value for ResourceType must be spot-fleet-request, otherwise the Spot Fleet request fails. To tag instances at launch, specify the tags in the launch template (valid only if you use LaunchTemplateConfigs) or in the SpotFleetTagSpecification (valid only if you use LaunchSpecifications). For information about tagging after launch, see Tagging Your Resources. + /// The key-value pair for tagging the Spot Fleet request on creation. The value for ResourceType must be spot-fleet-request, otherwise the Spot Fleet request fails. To tag instances at launch, specify the tags in the launch template (valid only if you use LaunchTemplateConfigs) or in the SpotFleetTagSpecification (valid only if you use LaunchSpecifications). For information about tagging after launch, see Tag your resources. @OptionalCustomCoding> public var tagSpecifications: [TagSpecification]? /// The number of units to request for the Spot Fleet. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later. @@ -47881,7 +47965,7 @@ extension EC2 { public let instancePoolsToUseCount: Int? /// The strategies for managing your workloads on your Spot Instances that will be interrupted. Currently only the capacity rebalance strategy is available. public let maintenanceStrategies: FleetSpotMaintenanceStrategies? - /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. + /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. public let maxTotalPrice: String? /// The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? @@ -47922,7 +48006,7 @@ extension EC2 { public let instancePoolsToUseCount: Int? /// The strategies for managing your Spot Instances that are at an elevated risk of being interrupted. public let maintenanceStrategies: FleetSpotMaintenanceStrategiesRequest? - /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. + /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. public let maxTotalPrice: String? /// The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api+async.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api+async.swift index dcdfb4a5f8..52c51936cb 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api+async.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api+async.swift @@ -186,7 +186,7 @@ extension ElasticLoadBalancingV2 { return try await self.client.execute(operation: "SetSecurityGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Enables the Availability Zones for the specified public subnets for the specified Application Load Balancer or Network Load Balancer. The specified subnets replace the previously enabled subnets. When you specify subnets for a Network Load Balancer, you must include all subnets that were enabled previously, with their existing configurations, plus any additional subnets. + /// Enables the Availability Zones for the specified public subnets for the specified Application Load Balancer, Network Load Balancer or Gateway Load Balancer. The specified subnets replace the previously enabled subnets. When you specify subnets for a Network Load Balancer, or Gateway Load Balancer you must include all subnets that were enabled previously, with their existing configurations, plus any additional subnets. public func setSubnets(_ input: SetSubnetsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SetSubnetsOutput { return try await self.client.execute(operation: "SetSubnets", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift index 20deffbe50..d0a0491217 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_api.swift @@ -239,7 +239,7 @@ public struct ElasticLoadBalancingV2: AWSService { return self.client.execute(operation: "SetSecurityGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Enables the Availability Zones for the specified public subnets for the specified Application Load Balancer or Network Load Balancer. The specified subnets replace the previously enabled subnets. When you specify subnets for a Network Load Balancer, you must include all subnets that were enabled previously, with their existing configurations, plus any additional subnets. + /// Enables the Availability Zones for the specified public subnets for the specified Application Load Balancer, Network Load Balancer or Gateway Load Balancer. The specified subnets replace the previously enabled subnets. When you specify subnets for a Network Load Balancer, or Gateway Load Balancer you must include all subnets that were enabled previously, with their existing configurations, plus any additional subnets. public func setSubnets(_ input: SetSubnetsInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "SetSubnets", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift index 59c49272c5..048e4e8cc3 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift @@ -1522,7 +1522,7 @@ extension ElasticLoadBalancingV2 { } public struct LoadBalancerAttribute: AWSEncodableShape & AWSDecodableShape { - /// The name of the attribute. The following attributes are supported by all load balancers: deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false. load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The possible values are true and false. The default for Network Load Balancers and Gateway Load Balancers is false. The default for Application Load Balancers is true, and cannot be changed. The following attributes are supported by both Application Load Balancers and Network Load Balancers: access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false. access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket. access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs. ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the load balancer. It is set to false for internet-facing load balancers and true for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway. The following attributes are supported by only Application Load Balancers: idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds. routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive. routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false. routing.http.preserve_host_header.enabled - Indicates whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. The possible values are true and false. The default is false. routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true and false. The default is false. routing.http.xff_client_port.enabled - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. The possible values are true and false. The default is false. routing.http.xff_header_processing.mode - Enables you to modify, preserve, or remove the X-Forwarded-For header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append, preserve, and remove. The default is append. If the value is append, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For header in the HTTP request before it sends it to targets. If the value is preserve the Application Load Balancer preserves the X-Forwarded-For header in the HTTP request, and sends it to targets without any change. If the value is remove, the Application Load Balancer removes the X-Forwarded-For header in the HTTP request before it sends it to targets. routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible values are true and false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The default is false. + /// The name of the attribute. The following attributes are supported by all load balancers: deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false. load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The possible values are true and false. The default for Network Load Balancers and Gateway Load Balancers is false. The default for Application Load Balancers is true, and cannot be changed. The following attributes are supported by both Application Load Balancers and Network Load Balancers: access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false. access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket. access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs. ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the load balancer. It is set to false for internet-facing load balancers and true for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway. The following attributes are supported by only Application Load Balancers: idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds. routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive. routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false. routing.http.preserve_host_header.enabled - Indicates whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. The possible values are true and false. The default is false. routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true and false. The default is false. routing.http.xff_client_port.enabled - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. The possible values are true and false. The default is false. routing.http.xff_header_processing.mode - Enables you to modify, preserve, or remove the X-Forwarded-For header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append, preserve, and remove. The default is append. If the value is append, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For header in the HTTP request before it sends it to targets. If the value is preserve the Application Load Balancer preserves the X-Forwarded-For header in the HTTP request, and sends it to targets without any change. If the value is remove, the Application Load Balancer removes the X-Forwarded-For header in the HTTP request before it sends it to targets. routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible values are true and false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The default is false. The following attributes are supported by only Network Load Balancers: dns_record.client_routing_policy - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity with 100 percent zonal affinity, partial_availability_zone_affinity with 85 percent zonal affinity, and any_availability_zone with 0 percent zonal affinity. public let key: String? /// The value of the attribute. public let value: String? @@ -2211,14 +2211,14 @@ extension ElasticLoadBalancingV2 { } public struct SetSubnetsInput: AWSEncodableShape { - /// [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. + /// [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). public let ipAddressType: IpAddressType? /// The Amazon Resource Name (ARN) of the load balancer. public let loadBalancerArn: String - /// The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. + /// The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. @OptionalCustomCoding public var subnetMappings: [SubnetMapping]? - /// The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. + /// The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. @OptionalCustomCoding public var subnets: [String]? @@ -2241,7 +2241,7 @@ extension ElasticLoadBalancingV2 { /// Information about the subnets. @OptionalCustomCoding public var availabilityZones: [AvailabilityZone]? - /// [Network Load Balancers] The IP address type. + /// [Network Load Balancers] The IP address type. [Gateway Load Balancers] The IP address type. public let ipAddressType: IpAddressType? public init(availabilityZones: [AvailabilityZone]? = nil, ipAddressType: IpAddressType? = nil) { diff --git a/Sources/Soto/Services/FSx/FSx_api+async.swift b/Sources/Soto/Services/FSx/FSx_api+async.swift index c8c4e13010..f3f4a924e4 100644 --- a/Sources/Soto/Services/FSx/FSx_api+async.swift +++ b/Sources/Soto/Services/FSx/FSx_api+async.swift @@ -186,6 +186,11 @@ extension FSx { return try await self.client.execute(operation: "RestoreVolumeFromSnapshot", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// After performing steps to repair the Active Directory configuration of an FSx for Windows File Server file system, use this action to initiate the process of Amazon FSx attempting to reconnect to the file system. + public func startMisconfiguredStateRecovery(_ input: StartMisconfiguredStateRecoveryRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StartMisconfiguredStateRecoveryResponse { + return try await self.client.execute(operation: "StartMisconfiguredStateRecovery", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Tags an Amazon FSx resource. public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> TagResourceResponse { return try await self.client.execute(operation: "TagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) diff --git a/Sources/Soto/Services/FSx/FSx_api.swift b/Sources/Soto/Services/FSx/FSx_api.swift index b128f423a2..59816bb277 100644 --- a/Sources/Soto/Services/FSx/FSx_api.swift +++ b/Sources/Soto/Services/FSx/FSx_api.swift @@ -240,6 +240,11 @@ public struct FSx: AWSService { return self.client.execute(operation: "RestoreVolumeFromSnapshot", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// After performing steps to repair the Active Directory configuration of an FSx for Windows File Server file system, use this action to initiate the process of Amazon FSx attempting to reconnect to the file system. + public func startMisconfiguredStateRecovery(_ input: StartMisconfiguredStateRecoveryRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "StartMisconfiguredStateRecovery", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Tags an Amazon FSx resource. public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "TagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) diff --git a/Sources/Soto/Services/FSx/FSx_shapes.swift b/Sources/Soto/Services/FSx/FSx_shapes.swift index 5c093d36ae..5786e23ac5 100644 --- a/Sources/Soto/Services/FSx/FSx_shapes.swift +++ b/Sources/Soto/Services/FSx/FSx_shapes.swift @@ -31,6 +31,7 @@ extension FSx { case fileSystemAliasDisassociation = "FILE_SYSTEM_ALIAS_DISASSOCIATION" case fileSystemUpdate = "FILE_SYSTEM_UPDATE" case iopsOptimization = "IOPS_OPTIMIZATION" + case misconfiguredStateRecovery = "MISCONFIGURED_STATE_RECOVERY" case releaseNfsV3Locks = "RELEASE_NFS_V3_LOCKS" case snapshotUpdate = "SNAPSHOT_UPDATE" case storageOptimization = "STORAGE_OPTIMIZATION" @@ -4986,6 +4987,42 @@ extension FSx { } } + public struct StartMisconfiguredStateRecoveryRequest: AWSEncodableShape { + public let clientRequestToken: String? + public let fileSystemId: String + + public init(clientRequestToken: String? = StartMisconfiguredStateRecoveryRequest.idempotencyToken(), fileSystemId: String) { + self.clientRequestToken = clientRequestToken + self.fileSystemId = fileSystemId + } + + public func validate(name: String) throws { + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$") + try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21) + try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11) + try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$") + } + + private enum CodingKeys: String, CodingKey { + case clientRequestToken = "ClientRequestToken" + case fileSystemId = "FileSystemId" + } + } + + public struct StartMisconfiguredStateRecoveryResponse: AWSDecodableShape { + public let fileSystem: FileSystem? + + public init(fileSystem: FileSystem? = nil) { + self.fileSystem = fileSystem + } + + private enum CodingKeys: String, CodingKey { + case fileSystem = "FileSystem" + } + } + public struct StorageVirtualMachine: AWSDecodableShape { /// Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable. public let activeDirectoryConfiguration: SvmActiveDirectoryConfiguration? diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 7c3230878b..4c50e15852 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -647,7 +647,9 @@ extension Glue { public enum SourceControlProvider: String, CustomStringConvertible, Codable, Sendable { case awsCodeCommit = "AWS_CODE_COMMIT" + case bitbucket = "BITBUCKET" case github = "GITHUB" + case gitlab = "GITLAB" public var description: String { return self.rawValue } } @@ -20233,9 +20235,9 @@ extension Glue { public let folder: String? /// The name of the Glue job to be synchronized to or from the remote repository. public let jobName: String? - /// The provider for the remote repository. + /// The provider for the remote repository. Possible values: GITHUB, AWS_CODE_COMMIT, GITLAB, BITBUCKET. public let provider: SourceControlProvider? - /// The name of the remote repository that contains the job artifacts. + /// The name of the remote repository that contains the job artifacts. For BitBucket providers, RepositoryName should include WorkspaceName. Use the format /. public let repositoryName: String? /// The owner of the remote repository that contains the job artifacts. public let repositoryOwner: String? @@ -20600,9 +20602,9 @@ extension Glue { public let folder: String? /// The name of the Glue job to be synchronized to or from the remote repository. public let jobName: String? - /// The provider for the remote repository. + /// The provider for the remote repository. Possible values: GITHUB, AWS_CODE_COMMIT, GITLAB, BITBUCKET. public let provider: SourceControlProvider? - /// The name of the remote repository that contains the job artifacts. + /// The name of the remote repository that contains the job artifacts. For BitBucket providers, RepositoryName should include WorkspaceName. Use the format /. public let repositoryName: String? /// The owner of the remote repository that contains the job artifacts. public let repositoryOwner: String? diff --git a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift index 1f929327cc..2a0ab501c8 100644 --- a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift +++ b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift @@ -617,32 +617,56 @@ extension IVSRealTime { public struct Participant: AWSDecodableShape { /// Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information. public let attributes: [String: String]? + /// The participant’s browser. + public let browserName: String? + /// The participant’s browser version. + public let browserVersion: String? /// ISO 8601 timestamp (returned as a string) when the participant first joined the stage session. @OptionalCustomCoding public var firstJoinTime: Date? + /// The participant’s Internet Service Provider. + public let ispName: String? + /// The participant’s operating system. + public let osName: String? + /// The participant’s operating system version. + public let osVersion: String? /// Unique identifier for this participant, assigned by IVS. public let participantId: String? /// Whether the participant ever published to the stage session. public let published: Bool? + /// The participant’s SDK version. + public let sdkVersion: String? /// Whether the participant is connected to or disconnected from the stage. public let state: ParticipantState? /// Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information. public let userId: String? - public init(attributes: [String: String]? = nil, firstJoinTime: Date? = nil, participantId: String? = nil, published: Bool? = nil, state: ParticipantState? = nil, userId: String? = nil) { + public init(attributes: [String: String]? = nil, browserName: String? = nil, browserVersion: String? = nil, firstJoinTime: Date? = nil, ispName: String? = nil, osName: String? = nil, osVersion: String? = nil, participantId: String? = nil, published: Bool? = nil, sdkVersion: String? = nil, state: ParticipantState? = nil, userId: String? = nil) { self.attributes = attributes + self.browserName = browserName + self.browserVersion = browserVersion self.firstJoinTime = firstJoinTime + self.ispName = ispName + self.osName = osName + self.osVersion = osVersion self.participantId = participantId self.published = published + self.sdkVersion = sdkVersion self.state = state self.userId = userId } private enum CodingKeys: String, CodingKey { case attributes = "attributes" + case browserName = "browserName" + case browserVersion = "browserVersion" case firstJoinTime = "firstJoinTime" + case ispName = "ispName" + case osName = "osName" + case osVersion = "osVersion" case participantId = "participantId" case published = "published" + case sdkVersion = "sdkVersion" case state = "state" case userId = "userId" } diff --git a/Sources/Soto/Services/Inspector2/Inspector2_api+async.swift b/Sources/Soto/Services/Inspector2/Inspector2_api+async.swift index f4b418b80d..e8e09b9261 100644 --- a/Sources/Soto/Services/Inspector2/Inspector2_api+async.swift +++ b/Sources/Soto/Services/Inspector2/Inspector2_api+async.swift @@ -66,7 +66,7 @@ extension Inspector2 { return try await self.client.execute(operation: "CancelSbomExport", path: "/sbomexport/cancel", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a filter resource using specified filter criteria. + /// Creates a filter resource using specified filter criteria. When the filter action is set to SUPPRESS this action creates a suppression rule. public func createFilter(_ input: CreateFilterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateFilterResponse { return try await self.client.execute(operation: "CreateFilter", path: "/filters/create", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/Inspector2/Inspector2_api.swift b/Sources/Soto/Services/Inspector2/Inspector2_api.swift index 88f7470dbd..9fd803c923 100644 --- a/Sources/Soto/Services/Inspector2/Inspector2_api.swift +++ b/Sources/Soto/Services/Inspector2/Inspector2_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS Inspector2 service. /// -/// Amazon Inspector is a vulnerability discovery service that automates continuous scanning for security vulnerabilities within your Amazon EC2 and Amazon ECR environments. +/// Amazon Inspector is a vulnerability discovery service that automates continuous scanning for security vulnerabilities within your Amazon EC2, Amazon ECR, and Amazon Web Services Lambda environments. public struct Inspector2: AWSService { // MARK: Member variables @@ -118,7 +118,7 @@ public struct Inspector2: AWSService { return self.client.execute(operation: "CancelSbomExport", path: "/sbomexport/cancel", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a filter resource using specified filter criteria. + /// Creates a filter resource using specified filter criteria. When the filter action is set to SUPPRESS this action creates a suppression rule. public func createFilter(_ input: CreateFilterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateFilter", path: "/filters/create", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift b/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift index 5fdf460c05..a4166c4a85 100644 --- a/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift +++ b/Sources/Soto/Services/Inspector2/Inspector2_shapes.swift @@ -139,6 +139,7 @@ extension Inspector2 { public enum Ec2Platform: String, CustomStringConvertible, Codable, Sendable { case linux = "LINUX" + case macos = "MACOS" case unknown = "UNKNOWN" case windows = "WINDOWS" public var description: String { return self.rawValue } @@ -1695,7 +1696,7 @@ extension Inspector2 { public let resourceId: [CoverageStringFilter]? /// An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE, AWS_LAMBDA_FUNCTION or AWS_ECR_REPOSITORY. public let resourceType: [CoverageStringFilter]? - /// The scan status code to filter on. + /// The scan status code to filter on. Valid values are: ValidationException, InternalServerException, ResourceNotFoundException, BadRequestException, and ThrottlingException. public let scanStatusCode: [CoverageStringFilter]? /// The scan status reason to filter on. public let scanStatusReason: [CoverageStringFilter]? @@ -3218,15 +3219,15 @@ extension Inspector2 { public let packageVulnerabilityDetails: PackageVulnerabilityDetails? /// An object that contains the details about how to remediate a finding. public let remediation: Remediation - /// Contains information on the resources involved in a finding. + /// Contains information on the resources involved in a finding. The resource value determines the valid values for type in your request. For more information, see Finding types in the Amazon Inspector user guide. public let resources: [Resource] - /// The severity of the finding. + /// The severity of the finding. UNTRIAGED applies to PACKAGE_VULNERABILITY type findings that the vendor has not assigned a severity yet. For more information, see Severity levels for findings in the Amazon Inspector user guide. public let severity: Severity /// The status of the finding. public let status: FindingStatus /// The title of the finding. public let title: String? - /// The type of the finding. + /// The type of the finding. The type value determines the valid values for resource in your request. For more information, see Finding types in the Amazon Inspector user guide. public let type: FindingType /// The date and time the finding was last updated at. public let updatedAt: Date? @@ -3986,9 +3987,9 @@ extension Inspector2 { } public struct ListAccountPermissionsRequest: AWSEncodableShape { - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? /// The service scan type to check permissions for. public let service: Service? @@ -4032,9 +4033,9 @@ extension Inspector2 { public struct ListCoverageRequest: AWSEncodableShape { /// An object that contains details on the filters to apply to the coverage data for your environment. public let filterCriteria: CoverageFilterCriteria? - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? public init(filterCriteria: CoverageFilterCriteria? = nil, maxResults: Int? = nil, nextToken: String? = nil) { @@ -4122,9 +4123,9 @@ extension Inspector2 { } public struct ListDelegatedAdminAccountsRequest: AWSEncodableShape { - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? public init(maxResults: Int? = nil, nextToken: String? = nil) { @@ -4166,9 +4167,9 @@ extension Inspector2 { public let action: FilterAction? /// The Amazon resource number (ARN) of the filter. public let arns: [String]? - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? public init(action: FilterAction? = nil, arns: [String]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { @@ -4220,9 +4221,9 @@ extension Inspector2 { public let aggregationRequest: AggregationRequest? /// The type of the aggregation request. public let aggregationType: AggregationType - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? public init(accountIds: [StringFilter]? = nil, aggregationRequest: AggregationRequest? = nil, aggregationType: AggregationType, maxResults: Int? = nil, nextToken: String? = nil) { @@ -4278,9 +4279,9 @@ extension Inspector2 { public struct ListFindingsRequest: AWSEncodableShape { /// Details on the filters to apply to your finding results. public let filterCriteria: FilterCriteria? - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? /// Details on the sort criteria to apply to your finding results. public let sortCriteria: SortCriteria? @@ -4325,9 +4326,9 @@ extension Inspector2 { } public struct ListMembersRequest: AWSEncodableShape { - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? /// Specifies whether to list only currently associated members if True or to list all members within the organization if False. public let onlyAssociated: Bool? @@ -4404,9 +4405,9 @@ extension Inspector2 { public struct ListUsageTotalsRequest: AWSEncodableShape { /// The Amazon Web Services account IDs to retrieve usage totals for. public let accountIds: [String]? - /// The maximum number of results to return in the response. + /// The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results. public let maxResults: Int? - /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page. + /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? public init(accountIds: [String]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { @@ -5164,7 +5165,7 @@ extension Inspector2 { } public struct ScanStatus: AWSDecodableShape { - /// The reason for the scan. + /// The scan status. Possible return values and descriptions are: PENDING_INITIAL_SCAN - This resource has been identified for scanning, results will be available soon. ACCESS_DENIED - Resource access policy restricting Amazon Inspector access. Please update the IAM policy. INTERNAL_ERROR - Amazon Inspector has encountered an internal error for this resource. Amazon Inspector service will automatically resolve the issue and resume the scanning. No action required from the user. UNMANAGED_EC2_INSTANCE - The EC2 instance is not managed by SSM, please use the following SSM automation to remediate the issue: https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshoot-managed-instance.html. Once the instance becomes managed by SSM, Inspector will automatically begin scanning this instance. UNSUPPORTED_OS - Amazon Inspector does not support this OS, architecture, or image manifest type at this time. To see a complete list of supported operating systems see: https://docs.aws.amazon.com/inspector/latest/user/supported.html. SCAN_ELIGIBILITY_EXPIRED - The configured scan duration has lapsed for this image. RESOURCE_TERMINATED - This resource has been terminated. The findings and coverage associated with this resource are in the process of being cleaned up. SUCCESSFUL - The scan was successful. NO_RESOURCES_FOUND - Reserved for future use. IMAGE_SIZE_EXCEEDED - Reserved for future use. SCAN_FREQUENCY_MANUAL - This image will not be covered by Amazon Inspector due to the repository scan frequency configuration. SCAN_FREQUENCY_SCAN_ON_PUSH - This image will be scanned one time and will not new findings because of the scan frequency configuration. EC2_INSTANCE_STOPPED - This EC2 instance is in a stopped state, therefore, Amazon Inspector will pause scanning. The existing findings will continue to exist until the instance is terminated. Once the instance is re-started, Inspector will automatically start scanning the instance again. Please note that you will not be charged for this instance while it’s in a stopped state. PENDING_DISABLE - This resource is pending cleanup during disablement. The customer will not be billed while a resource is in the pending disable status. NO INVENTORY - Amazon Inspector couldn’t find software application inventory to scan for vulnerabilities. This might be caused due to required Amazon Inspector associations being deleted or failing to run on your resource. Please verify the status of InspectorInventoryCollection-do-not-delete association in the SSM console for the resource. Additionally, you can verify the instance’s inventory in the SSM Fleet Manager console. STALE_INVENTORY - Amazon Inspector wasn’t able to collect an updated software application inventory in the last 7 days. Please confirm the required Amazon Inspector associations still exist and you can still see an updated inventory in the SSM console. EXCLUDED_BY_TAG - This resource was not scanned because it has been excluded by a tag. UNSUPPORTED_RUNTIME - The function was not scanned because it has an unsupported runtime. To see a complete list of supported runtimes see: https://docs.aws.amazon.com/inspector/latest/user/supported.html. UNSUPPORTED_MEDIA_TYPE - The ECR image has an unsupported media type. UNSUPPORTED_CONFIG_FILE - Reserved for future use. DEEP_INSPECTION_PACKAGE_COLLECTION_LIMIT_EXCEEDED - The instance has exceeded the 5000 package limit for Amazon Inspector Deep inspection. To resume Deep inspection for this instance you can try to adjust the custom paths associated with the account. DEEP_INSPECTION_DAILY_SSM_INVENTORY_LIMIT_EXCEEDED - The SSM agent couldn't send inventory to Amazon Inspector because the SSM quota for Inventory data collected per instance per day has already been reached for this instance. DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED - Amazon Inspector failed to extract the package inventory because the package collection time exceeding the maximum threshold of 15 minutes. DEEP_INSPECTION_NO_INVENTORY The Amazon Inspector plugin hasn't yet been able to collect an inventory of packages for this instance. This is usually the result of a pending scan, however, if this status persists after 6 hours, use SSM to ensure that the required Amazon Inspector associations exist and are running for the instance. public let reason: ScanStatusReason /// The status code of the scan. public let statusCode: ScanStatusCode @@ -5772,7 +5773,7 @@ extension Inspector2 { public let referenceUrls: [String]? /// A list of related vulnerabilities. public let relatedVulnerabilities: [String]? - /// The source of the vulnerability information. + /// The source of the vulnerability information. Possible results are RHEL, AMAZON_CVE, DEBIAN or NVD. public let source: VulnerabilitySource? /// A link to the official source material for this vulnerability. public let sourceUrl: String? diff --git a/Sources/Soto/Services/Lambda/Lambda_shapes.swift b/Sources/Soto/Services/Lambda/Lambda_shapes.swift index 308897add1..7adb6e65a0 100644 --- a/Sources/Soto/Services/Lambda/Lambda_shapes.swift +++ b/Sources/Soto/Services/Lambda/Lambda_shapes.swift @@ -4916,12 +4916,15 @@ extension Lambda { } public struct VpcConfig: AWSEncodableShape { + /// Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. + public let ipv6AllowedForDualStack: Bool? /// A list of VPC security group IDs. public let securityGroupIds: [String]? /// A list of VPC subnet IDs. public let subnetIds: [String]? - public init(securityGroupIds: [String]? = nil, subnetIds: [String]? = nil) { + public init(ipv6AllowedForDualStack: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil) { + self.ipv6AllowedForDualStack = ipv6AllowedForDualStack self.securityGroupIds = securityGroupIds self.subnetIds = subnetIds } @@ -4932,12 +4935,15 @@ extension Lambda { } private enum CodingKeys: String, CodingKey { + case ipv6AllowedForDualStack = "Ipv6AllowedForDualStack" case securityGroupIds = "SecurityGroupIds" case subnetIds = "SubnetIds" } } public struct VpcConfigResponse: AWSDecodableShape { + /// Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. + public let ipv6AllowedForDualStack: Bool? /// A list of VPC security group IDs. public let securityGroupIds: [String]? /// A list of VPC subnet IDs. @@ -4945,13 +4951,15 @@ extension Lambda { /// The ID of the VPC. public let vpcId: String? - public init(securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, vpcId: String? = nil) { + public init(ipv6AllowedForDualStack: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, vpcId: String? = nil) { + self.ipv6AllowedForDualStack = ipv6AllowedForDualStack self.securityGroupIds = securityGroupIds self.subnetIds = subnetIds self.vpcId = vpcId } private enum CodingKeys: String, CodingKey { + case ipv6AllowedForDualStack = "Ipv6AllowedForDualStack" case securityGroupIds = "SecurityGroupIds" case subnetIds = "SubnetIds" case vpcId = "VpcId" diff --git a/Sources/Soto/Services/Location/Location_api+async.swift b/Sources/Soto/Services/Location/Location_api+async.swift index 308bff1d0d..e26215b276 100644 --- a/Sources/Soto/Services/Location/Location_api+async.swift +++ b/Sources/Soto/Services/Location/Location_api+async.swift @@ -23,7 +23,7 @@ extension Location { /// Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection. You can associate up to five geofence collections to each tracker resource. Currently not supported — Cross-account configurations, such as creating associations between a tracker resource in one account and a geofence collection in another account. public func associateTrackerConsumer(_ input: AssociateTrackerConsumerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> AssociateTrackerConsumerResponse { - return try await self.client.execute(operation: "AssociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "AssociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Deletes the position history of one or more devices from a tracker resource. @@ -68,97 +68,97 @@ extension Location { /// Creates a geofence collection, which manages and stores geofences. public func createGeofenceCollection(_ input: CreateGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateGeofenceCollectionResponse { - return try await self.client.execute(operation: "CreateGeofenceCollection", path: "/geofencing/v0/collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "CreateGeofenceCollection", path: "/geofencing/v0/collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Creates an API key resource in your Amazon Web Services account, which lets you grant actions for Amazon Location resources to the API key bearer. For more information, see Using API keys. public func createKey(_ input: CreateKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateKeyResponse { - return try await self.client.execute(operation: "CreateKey", path: "/metadata/v0/keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "CreateKey", path: "/metadata/v0/keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Creates a map resource in your Amazon Web Services account, which provides map tiles of different styles sourced from global location data providers. If your application is tracking or routing assets you use in your business, such as delivery vehicles or employees, you must not use Esri as your geolocation provider. See section 82 of the Amazon Web Services service terms for more details. public func createMap(_ input: CreateMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateMapResponse { - return try await self.client.execute(operation: "CreateMap", path: "/maps/v0/maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "CreateMap", path: "/maps/v0/maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Creates a place index resource in your Amazon Web Services account. Use a place index resource to geocode addresses and other text queries by using the SearchPlaceIndexForText operation, and reverse geocode coordinates by using the SearchPlaceIndexForPosition operation, and enable autosuggestions by using the SearchPlaceIndexForSuggestions operation. If your application is tracking or routing assets you use in your business, such as delivery vehicles or employees, you must not use Esri as your geolocation provider. See section 82 of the Amazon Web Services service terms for more details. public func createPlaceIndex(_ input: CreatePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreatePlaceIndexResponse { - return try await self.client.execute(operation: "CreatePlaceIndex", path: "/places/v0/indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "CreatePlaceIndex", path: "/places/v0/indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Creates a route calculator resource in your Amazon Web Services account. You can send requests to a route calculator resource to estimate travel time, distance, and get directions. A route calculator sources traffic and road network data from your chosen data provider. If your application is tracking or routing assets you use in your business, such as delivery vehicles or employees, you must not use Esri as your geolocation provider. See section 82 of the Amazon Web Services service terms for more details. public func createRouteCalculator(_ input: CreateRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateRouteCalculatorResponse { - return try await self.client.execute(operation: "CreateRouteCalculator", path: "/routes/v0/calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "CreateRouteCalculator", path: "/routes/v0/calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Creates a tracker resource in your Amazon Web Services account, which lets you retrieve current and historical location of devices. public func createTracker(_ input: CreateTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateTrackerResponse { - return try await self.client.execute(operation: "CreateTracker", path: "/tracking/v0/trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "CreateTracker", path: "/tracking/v0/trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Deletes a geofence collection from your Amazon Web Services account. This operation deletes the resource permanently. If the geofence collection is the target of a tracker resource, the devices will no longer be monitored. public func deleteGeofenceCollection(_ input: DeleteGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteGeofenceCollectionResponse { - return try await self.client.execute(operation: "DeleteGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DeleteGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Deletes the specified API key. The API key must have been deactivated more than 90 days previously. public func deleteKey(_ input: DeleteKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteKeyResponse { - return try await self.client.execute(operation: "DeleteKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DeleteKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Deletes a map resource from your Amazon Web Services account. This operation deletes the resource permanently. If the map is being used in an application, the map may not render. public func deleteMap(_ input: DeleteMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteMapResponse { - return try await self.client.execute(operation: "DeleteMap", path: "/maps/v0/maps/{MapName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DeleteMap", path: "/maps/v0/maps/{MapName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Deletes a place index resource from your Amazon Web Services account. This operation deletes the resource permanently. public func deletePlaceIndex(_ input: DeletePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeletePlaceIndexResponse { - return try await self.client.execute(operation: "DeletePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DeletePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Deletes a route calculator resource from your Amazon Web Services account. This operation deletes the resource permanently. public func deleteRouteCalculator(_ input: DeleteRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteRouteCalculatorResponse { - return try await self.client.execute(operation: "DeleteRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DeleteRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Deletes a tracker resource from your Amazon Web Services account. This operation deletes the resource permanently. If the tracker resource is in use, you may encounter an error. Make sure that the target resource isn't a dependency for your applications. public func deleteTracker(_ input: DeleteTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteTrackerResponse { - return try await self.client.execute(operation: "DeleteTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DeleteTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Retrieves the geofence collection details. public func describeGeofenceCollection(_ input: DescribeGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeGeofenceCollectionResponse { - return try await self.client.execute(operation: "DescribeGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DescribeGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Retrieves the API key resource details. public func describeKey(_ input: DescribeKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeKeyResponse { - return try await self.client.execute(operation: "DescribeKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DescribeKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Retrieves the map resource details. public func describeMap(_ input: DescribeMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeMapResponse { - return try await self.client.execute(operation: "DescribeMap", path: "/maps/v0/maps/{MapName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DescribeMap", path: "/maps/v0/maps/{MapName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Retrieves the place index resource details. public func describePlaceIndex(_ input: DescribePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribePlaceIndexResponse { - return try await self.client.execute(operation: "DescribePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DescribePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Retrieves the route calculator resource details. public func describeRouteCalculator(_ input: DescribeRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeRouteCalculatorResponse { - return try await self.client.execute(operation: "DescribeRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DescribeRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Retrieves the tracker resource details. public func describeTracker(_ input: DescribeTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeTrackerResponse { - return try await self.client.execute(operation: "DescribeTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DescribeTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Removes the association between a tracker resource and a geofence collection. Once you unlink a tracker resource from a geofence collection, the tracker positions will no longer be automatically evaluated against geofences. public func disassociateTrackerConsumer(_ input: DisassociateTrackerConsumerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DisassociateTrackerConsumerResponse { - return try await self.client.execute(operation: "DisassociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "DisassociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Retrieves a device's most recent position according to its sample time. Device positions are deleted after 30 days. @@ -208,7 +208,7 @@ extension Location { /// Lists geofence collections in your Amazon Web Services account. public func listGeofenceCollections(_ input: ListGeofenceCollectionsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListGeofenceCollectionsResponse { - return try await self.client.execute(operation: "ListGeofenceCollections", path: "/geofencing/v0/list-collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListGeofenceCollections", path: "/geofencing/v0/list-collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Lists geofences stored in a given geofence collection. @@ -218,37 +218,37 @@ extension Location { /// Lists API key resources in your Amazon Web Services account. public func listKeys(_ input: ListKeysRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListKeysResponse { - return try await self.client.execute(operation: "ListKeys", path: "/metadata/v0/list-keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListKeys", path: "/metadata/v0/list-keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Lists map resources in your Amazon Web Services account. public func listMaps(_ input: ListMapsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListMapsResponse { - return try await self.client.execute(operation: "ListMaps", path: "/maps/v0/list-maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListMaps", path: "/maps/v0/list-maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Lists place index resources in your Amazon Web Services account. public func listPlaceIndexes(_ input: ListPlaceIndexesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListPlaceIndexesResponse { - return try await self.client.execute(operation: "ListPlaceIndexes", path: "/places/v0/list-indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListPlaceIndexes", path: "/places/v0/list-indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Lists route calculator resources in your Amazon Web Services account. public func listRouteCalculators(_ input: ListRouteCalculatorsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListRouteCalculatorsResponse { - return try await self.client.execute(operation: "ListRouteCalculators", path: "/routes/v0/list-calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListRouteCalculators", path: "/routes/v0/list-calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Returns a list of tags that are applied to the specified Amazon Location resource. public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListTagsForResourceResponse { - return try await self.client.execute(operation: "ListTagsForResource", path: "/tags/{ResourceArn}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListTagsForResource", path: "/tags/{ResourceArn}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Lists geofence collections currently associated to the given tracker resource. public func listTrackerConsumers(_ input: ListTrackerConsumersRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListTrackerConsumersResponse { - return try await self.client.execute(operation: "ListTrackerConsumers", path: "/tracking/v0/trackers/{TrackerName}/list-consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListTrackerConsumers", path: "/tracking/v0/trackers/{TrackerName}/list-consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Lists tracker resources in your Amazon Web Services account. public func listTrackers(_ input: ListTrackersRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListTrackersResponse { - return try await self.client.execute(operation: "ListTrackers", path: "/tracking/v0/list-trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "ListTrackers", path: "/tracking/v0/list-trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Stores a geofence geometry in a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request. @@ -273,42 +273,42 @@ extension Location { /// Assigns one or more tags (key-value pairs) to the specified Amazon Location Service resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with an Amazon Location Service resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the tags already associated with the resource. If you specify a tag key that's already associated with the resource, the new tag value that you specify replaces the previous value for that tag. You can associate up to 50 tags with a resource. public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> TagResourceResponse { - return try await self.client.execute(operation: "TagResource", path: "/tags/{ResourceArn}", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "TagResource", path: "/tags/{ResourceArn}", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Removes one or more tags from the specified Amazon Location resource. public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UntagResourceResponse { - return try await self.client.execute(operation: "UntagResource", path: "/tags/{ResourceArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UntagResource", path: "/tags/{ResourceArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given geofence collection. public func updateGeofenceCollection(_ input: UpdateGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateGeofenceCollectionResponse { - return try await self.client.execute(operation: "UpdateGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UpdateGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given API key resource. public func updateKey(_ input: UpdateKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateKeyResponse { - return try await self.client.execute(operation: "UpdateKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UpdateKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given map resource. public func updateMap(_ input: UpdateMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateMapResponse { - return try await self.client.execute(operation: "UpdateMap", path: "/maps/v0/maps/{MapName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UpdateMap", path: "/maps/v0/maps/{MapName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given place index resource. public func updatePlaceIndex(_ input: UpdatePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdatePlaceIndexResponse { - return try await self.client.execute(operation: "UpdatePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UpdatePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Updates the specified properties for a given route calculator resource. public func updateRouteCalculator(_ input: UpdateRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateRouteCalculatorResponse { - return try await self.client.execute(operation: "UpdateRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UpdateRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given tracker resource. public func updateTracker(_ input: UpdateTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateTrackerResponse { - return try await self.client.execute(operation: "UpdateTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return try await self.client.execute(operation: "UpdateTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } } diff --git a/Sources/Soto/Services/Location/Location_api.swift b/Sources/Soto/Services/Location/Location_api.swift index 643f98a945..b4b1fde523 100644 --- a/Sources/Soto/Services/Location/Location_api.swift +++ b/Sources/Soto/Services/Location/Location_api.swift @@ -70,7 +70,7 @@ public struct Location: AWSService { /// Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection. You can associate up to five geofence collections to each tracker resource. Currently not supported — Cross-account configurations, such as creating associations between a tracker resource in one account and a geofence collection in another account. public func associateTrackerConsumer(_ input: AssociateTrackerConsumerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "AssociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "AssociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Deletes the position history of one or more devices from a tracker resource. @@ -115,97 +115,97 @@ public struct Location: AWSService { /// Creates a geofence collection, which manages and stores geofences. public func createGeofenceCollection(_ input: CreateGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "CreateGeofenceCollection", path: "/geofencing/v0/collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return self.client.execute(operation: "CreateGeofenceCollection", path: "/geofencing/v0/collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Creates an API key resource in your Amazon Web Services account, which lets you grant actions for Amazon Location resources to the API key bearer. For more information, see Using API keys. public func createKey(_ input: CreateKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "CreateKey", path: "/metadata/v0/keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "CreateKey", path: "/metadata/v0/keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Creates a map resource in your Amazon Web Services account, which provides map tiles of different styles sourced from global location data providers. If your application is tracking or routing assets you use in your business, such as delivery vehicles or employees, you must not use Esri as your geolocation provider. See section 82 of the Amazon Web Services service terms for more details. public func createMap(_ input: CreateMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "CreateMap", path: "/maps/v0/maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return self.client.execute(operation: "CreateMap", path: "/maps/v0/maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Creates a place index resource in your Amazon Web Services account. Use a place index resource to geocode addresses and other text queries by using the SearchPlaceIndexForText operation, and reverse geocode coordinates by using the SearchPlaceIndexForPosition operation, and enable autosuggestions by using the SearchPlaceIndexForSuggestions operation. If your application is tracking or routing assets you use in your business, such as delivery vehicles or employees, you must not use Esri as your geolocation provider. See section 82 of the Amazon Web Services service terms for more details. public func createPlaceIndex(_ input: CreatePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "CreatePlaceIndex", path: "/places/v0/indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return self.client.execute(operation: "CreatePlaceIndex", path: "/places/v0/indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Creates a route calculator resource in your Amazon Web Services account. You can send requests to a route calculator resource to estimate travel time, distance, and get directions. A route calculator sources traffic and road network data from your chosen data provider. If your application is tracking or routing assets you use in your business, such as delivery vehicles or employees, you must not use Esri as your geolocation provider. See section 82 of the Amazon Web Services service terms for more details. public func createRouteCalculator(_ input: CreateRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "CreateRouteCalculator", path: "/routes/v0/calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return self.client.execute(operation: "CreateRouteCalculator", path: "/routes/v0/calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Creates a tracker resource in your Amazon Web Services account, which lets you retrieve current and historical location of devices. public func createTracker(_ input: CreateTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "CreateTracker", path: "/tracking/v0/trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "CreateTracker", path: "/tracking/v0/trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Deletes a geofence collection from your Amazon Web Services account. This operation deletes the resource permanently. If the geofence collection is the target of a tracker resource, the devices will no longer be monitored. public func deleteGeofenceCollection(_ input: DeleteGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DeleteGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DeleteGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Deletes the specified API key. The API key must have been deactivated more than 90 days previously. public func deleteKey(_ input: DeleteKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DeleteKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DeleteKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Deletes a map resource from your Amazon Web Services account. This operation deletes the resource permanently. If the map is being used in an application, the map may not render. public func deleteMap(_ input: DeleteMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DeleteMap", path: "/maps/v0/maps/{MapName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DeleteMap", path: "/maps/v0/maps/{MapName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Deletes a place index resource from your Amazon Web Services account. This operation deletes the resource permanently. public func deletePlaceIndex(_ input: DeletePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DeletePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DeletePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Deletes a route calculator resource from your Amazon Web Services account. This operation deletes the resource permanently. public func deleteRouteCalculator(_ input: DeleteRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DeleteRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DeleteRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Deletes a tracker resource from your Amazon Web Services account. This operation deletes the resource permanently. If the tracker resource is in use, you may encounter an error. Make sure that the target resource isn't a dependency for your applications. public func deleteTracker(_ input: DeleteTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DeleteTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DeleteTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Retrieves the geofence collection details. public func describeGeofenceCollection(_ input: DescribeGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DescribeGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DescribeGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Retrieves the API key resource details. public func describeKey(_ input: DescribeKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DescribeKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DescribeKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Retrieves the map resource details. public func describeMap(_ input: DescribeMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DescribeMap", path: "/maps/v0/maps/{MapName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DescribeMap", path: "/maps/v0/maps/{MapName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Retrieves the place index resource details. public func describePlaceIndex(_ input: DescribePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DescribePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DescribePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Retrieves the route calculator resource details. public func describeRouteCalculator(_ input: DescribeRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DescribeRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DescribeRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Retrieves the tracker resource details. public func describeTracker(_ input: DescribeTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DescribeTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DescribeTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Removes the association between a tracker resource and a geofence collection. Once you unlink a tracker resource from a geofence collection, the tracker positions will no longer be automatically evaluated against geofences. public func disassociateTrackerConsumer(_ input: DisassociateTrackerConsumerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "DisassociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "DisassociateTrackerConsumer", path: "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Retrieves a device's most recent position according to its sample time. Device positions are deleted after 30 days. @@ -255,7 +255,7 @@ public struct Location: AWSService { /// Lists geofence collections in your Amazon Web Services account. public func listGeofenceCollections(_ input: ListGeofenceCollectionsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListGeofenceCollections", path: "/geofencing/v0/list-collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListGeofenceCollections", path: "/geofencing/v0/list-collections", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Lists geofences stored in a given geofence collection. @@ -265,37 +265,37 @@ public struct Location: AWSService { /// Lists API key resources in your Amazon Web Services account. public func listKeys(_ input: ListKeysRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListKeys", path: "/metadata/v0/list-keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListKeys", path: "/metadata/v0/list-keys", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Lists map resources in your Amazon Web Services account. public func listMaps(_ input: ListMapsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListMaps", path: "/maps/v0/list-maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListMaps", path: "/maps/v0/list-maps", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Lists place index resources in your Amazon Web Services account. public func listPlaceIndexes(_ input: ListPlaceIndexesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListPlaceIndexes", path: "/places/v0/list-indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListPlaceIndexes", path: "/places/v0/list-indexes", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Lists route calculator resources in your Amazon Web Services account. public func listRouteCalculators(_ input: ListRouteCalculatorsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListRouteCalculators", path: "/routes/v0/list-calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListRouteCalculators", path: "/routes/v0/list-calculators", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Returns a list of tags that are applied to the specified Amazon Location resource. public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListTagsForResource", path: "/tags/{ResourceArn}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListTagsForResource", path: "/tags/{ResourceArn}", httpMethod: .GET, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Lists geofence collections currently associated to the given tracker resource. public func listTrackerConsumers(_ input: ListTrackerConsumersRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListTrackerConsumers", path: "/tracking/v0/trackers/{TrackerName}/list-consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListTrackerConsumers", path: "/tracking/v0/trackers/{TrackerName}/list-consumers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Lists tracker resources in your Amazon Web Services account. public func listTrackers(_ input: ListTrackersRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "ListTrackers", path: "/tracking/v0/list-trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "ListTrackers", path: "/tracking/v0/list-trackers", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } /// Stores a geofence geometry in a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request. @@ -320,42 +320,42 @@ public struct Location: AWSService { /// Assigns one or more tags (key-value pairs) to the specified Amazon Location Service resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with an Amazon Location Service resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the tags already associated with the resource. If you specify a tag key that's already associated with the resource, the new tag value that you specify replaces the previous value for that tag. You can associate up to 50 tags with a resource. public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "TagResource", path: "/tags/{ResourceArn}", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "TagResource", path: "/tags/{ResourceArn}", httpMethod: .POST, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Removes one or more tags from the specified Amazon Location resource. public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UntagResource", path: "/tags/{ResourceArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UntagResource", path: "/tags/{ResourceArn}", httpMethod: .DELETE, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given geofence collection. public func updateGeofenceCollection(_ input: UpdateGeofenceCollectionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UpdateGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "geofencing.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UpdateGeofenceCollection", path: "/geofencing/v0/collections/{CollectionName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.geofencing.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given API key resource. public func updateKey(_ input: UpdateKeyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UpdateKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "metadata.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UpdateKey", path: "/metadata/v0/keys/{KeyName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.metadata.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given map resource. public func updateMap(_ input: UpdateMapRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UpdateMap", path: "/maps/v0/maps/{MapName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "maps.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UpdateMap", path: "/maps/v0/maps/{MapName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.maps.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given place index resource. public func updatePlaceIndex(_ input: UpdatePlaceIndexRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UpdatePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "places.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UpdatePlaceIndex", path: "/places/v0/indexes/{IndexName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.places.", logger: logger, on: eventLoop) } /// Updates the specified properties for a given route calculator resource. public func updateRouteCalculator(_ input: UpdateRouteCalculatorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UpdateRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "routes.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UpdateRouteCalculator", path: "/routes/v0/calculators/{CalculatorName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.routes.", logger: logger, on: eventLoop) } /// Updates the specified properties of a given tracker resource. public func updateTracker(_ input: UpdateTrackerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { - return self.client.execute(operation: "UpdateTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "tracking.", logger: logger, on: eventLoop) + return self.client.execute(operation: "UpdateTracker", path: "/tracking/v0/trackers/{TrackerName}", httpMethod: .PATCH, serviceConfig: self.config, input: input, hostPrefix: "cp.tracking.", logger: logger, on: eventLoop) } } diff --git a/Sources/Soto/Services/M2/M2_api.swift b/Sources/Soto/Services/M2/M2_api.swift index 4ca220f368..0c0fbaa475 100644 --- a/Sources/Soto/Services/M2/M2_api.swift +++ b/Sources/Soto/Services/M2/M2_api.swift @@ -59,6 +59,8 @@ public struct M2: AWSService { "ca-central-1": "m2-fips.ca-central-1.amazonaws.com", "us-east-1": "m2-fips.us-east-1.amazonaws.com", "us-east-2": "m2-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "m2-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "m2-fips.us-gov-west-1.amazonaws.com", "us-west-1": "m2-fips.us-west-1.amazonaws.com", "us-west-2": "m2-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api+async.swift b/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api+async.swift index d1a1f69d4a..073bf2a064 100644 --- a/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api+async.swift +++ b/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api+async.swift @@ -26,7 +26,7 @@ extension MarketplaceCatalog { return try await self.client.execute(operation: "CancelChangeSet", path: "/CancelChangeSet", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes a resource-based policy on an Entity that is identified by its resource ARN. + /// Deletes a resource-based policy on an entity that is identified by its resource ARN. public func deleteResourcePolicy(_ input: DeleteResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteResourcePolicyResponse { return try await self.client.execute(operation: "DeleteResourcePolicy", path: "/DeleteResourcePolicy", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -41,7 +41,7 @@ extension MarketplaceCatalog { return try await self.client.execute(operation: "DescribeEntity", path: "/DescribeEntity", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets a resource-based policy of an Entity that is identified by its resource ARN. + /// Gets a resource-based policy of an entity that is identified by its resource ARN. public func getResourcePolicy(_ input: GetResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetResourcePolicyResponse { return try await self.client.execute(operation: "GetResourcePolicy", path: "/GetResourcePolicy", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -61,12 +61,12 @@ extension MarketplaceCatalog { return try await self.client.execute(operation: "ListTagsForResource", path: "/ListTagsForResource", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Attaches a resource-based policy to an Entity. Examples of an entity include: AmiProduct and ContainerProduct. + /// Attaches a resource-based policy to an entity. Examples of an entity include: AmiProduct and ContainerProduct. public func putResourcePolicy(_ input: PutResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> PutResourcePolicyResponse { return try await self.client.execute(operation: "PutResourcePolicy", path: "/PutResourcePolicy", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error. For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1). For more information about working with change sets, see Working with change sets. For information on change types for single-AMI products, see Working with single-AMI products. Als, for more information on change types available for container-based products, see Working with container products. + /// Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error. For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1). For more information about working with change sets, see Working with change sets. For information about change types for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products. public func startChangeSet(_ input: StartChangeSetRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StartChangeSetResponse { return try await self.client.execute(operation: "StartChangeSet", path: "/StartChangeSet", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api.swift b/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api.swift index 9e2c711e28..c469cc6122 100644 --- a/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api.swift +++ b/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_api.swift @@ -69,7 +69,7 @@ public struct MarketplaceCatalog: AWSService { return self.client.execute(operation: "CancelChangeSet", path: "/CancelChangeSet", httpMethod: .PATCH, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes a resource-based policy on an Entity that is identified by its resource ARN. + /// Deletes a resource-based policy on an entity that is identified by its resource ARN. public func deleteResourcePolicy(_ input: DeleteResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteResourcePolicy", path: "/DeleteResourcePolicy", httpMethod: .DELETE, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -84,7 +84,7 @@ public struct MarketplaceCatalog: AWSService { return self.client.execute(operation: "DescribeEntity", path: "/DescribeEntity", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets a resource-based policy of an Entity that is identified by its resource ARN. + /// Gets a resource-based policy of an entity that is identified by its resource ARN. public func getResourcePolicy(_ input: GetResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "GetResourcePolicy", path: "/GetResourcePolicy", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -104,12 +104,12 @@ public struct MarketplaceCatalog: AWSService { return self.client.execute(operation: "ListTagsForResource", path: "/ListTagsForResource", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Attaches a resource-based policy to an Entity. Examples of an entity include: AmiProduct and ContainerProduct. + /// Attaches a resource-based policy to an entity. Examples of an entity include: AmiProduct and ContainerProduct. public func putResourcePolicy(_ input: PutResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "PutResourcePolicy", path: "/PutResourcePolicy", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error. For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1). For more information about working with change sets, see Working with change sets. For information on change types for single-AMI products, see Working with single-AMI products. Als, for more information on change types available for container-based products, see Working with container products. + /// Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error. For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1). For more information about working with change sets, see Working with change sets. For information about change types for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products. public func startChangeSet(_ input: StartChangeSetRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "StartChangeSet", path: "/StartChangeSet", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_shapes.swift b/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_shapes.swift index 65deedaf8e..c51b175500 100644 --- a/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_shapes.swift +++ b/Sources/Soto/Services/MarketplaceCatalog/MarketplaceCatalog_shapes.swift @@ -103,19 +103,22 @@ extension MarketplaceCatalog { public struct Change: AWSEncodableShape { /// Optional name for the change. public let changeName: String? - /// Change types are single string values that describe your intention for the change. Each change type is unique for each EntityType provided in the change's scope. For more information on change types available for single-AMI products, see Working with single-AMI products. Also, for more information on change types available for container-based products, see Working with container products. + /// Change types are single string values that describe your intention for the change. Each change type is unique for each EntityType provided in the change's scope. For more information on change types available for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products. public let changeType: String - /// This object contains details specific to the change type of the requested change. For more information on change types available for single-AMI products, see Working with single-AMI products. Also, for more information on change types available for container-based products, see Working with container products. - public let details: String + /// This object contains details specific to the change type of the requested change. For more information about change types available for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products. + public let details: String? + /// Alternative field that accepts a JSON value instead of a string for ChangeType details. You can use either Details or DetailsDocument, but not both. + public let detailsDocument: String? /// The entity to be changed. public let entity: Entity /// The tags associated with the change. public let entityTags: [Tag]? - public init(changeName: String? = nil, changeType: String, details: String, entity: Entity, entityTags: [Tag]? = nil) { + public init(changeName: String? = nil, changeType: String, details: String? = nil, detailsDocument: String? = nil, entity: Entity, entityTags: [Tag]? = nil) { self.changeName = changeName self.changeType = changeType self.details = details + self.detailsDocument = detailsDocument self.entity = entity self.entityTags = entityTags } @@ -142,6 +145,7 @@ extension MarketplaceCatalog { case changeName = "ChangeName" case changeType = "ChangeType" case details = "Details" + case detailsDocument = "DetailsDocument" case entity = "Entity" case entityTags = "EntityTags" } @@ -195,15 +199,18 @@ extension MarketplaceCatalog { public let changeType: String? /// This object contains details specific to the change type of the requested change. public let details: String? + /// The JSON value of the details specific to the change type of the requested change. + public let detailsDocument: String? /// The entity to be changed. public let entity: Entity? /// An array of ErrorDetail objects associated with the change. public let errorDetailList: [ErrorDetail]? - public init(changeName: String? = nil, changeType: String? = nil, details: String? = nil, entity: Entity? = nil, errorDetailList: [ErrorDetail]? = nil) { + public init(changeName: String? = nil, changeType: String? = nil, details: String? = nil, detailsDocument: String? = nil, entity: Entity? = nil, errorDetailList: [ErrorDetail]? = nil) { self.changeName = changeName self.changeType = changeType self.details = details + self.detailsDocument = detailsDocument self.entity = entity self.errorDetailList = errorDetailList } @@ -212,6 +219,7 @@ extension MarketplaceCatalog { case changeName = "ChangeName" case changeType = "ChangeType" case details = "Details" + case detailsDocument = "DetailsDocument" case entity = "Entity" case errorDetailList = "ErrorDetailList" } @@ -222,7 +230,7 @@ extension MarketplaceCatalog { AWSMemberEncoding(label: "resourceArn", location: .querystring("resourceArn")) ] - /// The Amazon Resource Name (ARN) of the Entity resource that is associated with the resource policy. + /// The Amazon Resource Name (ARN) of the entity resource that is associated with the resource policy. public let resourceArn: String public init(resourceArn: String) { @@ -346,6 +354,8 @@ extension MarketplaceCatalog { public struct DescribeEntityResponse: AWSDecodableShape { /// This stringified JSON object includes the details of the entity. public let details: String? + /// The JSON value of the details specific to the entity. + public let detailsDocument: String? /// The ARN associated to the unique identifier for the entity referenced in this request. public let entityArn: String? /// The identifier of the entity, in the format of EntityId@RevisionId. @@ -355,8 +365,9 @@ extension MarketplaceCatalog { /// The last modified date of the entity, in ISO 8601 format (2018-02-27T13:45:22Z). public let lastModifiedDate: String? - public init(details: String? = nil, entityArn: String? = nil, entityIdentifier: String? = nil, entityType: String? = nil, lastModifiedDate: String? = nil) { + public init(details: String? = nil, detailsDocument: String? = nil, entityArn: String? = nil, entityIdentifier: String? = nil, entityType: String? = nil, lastModifiedDate: String? = nil) { self.details = details + self.detailsDocument = detailsDocument self.entityArn = entityArn self.entityIdentifier = entityIdentifier self.entityType = entityType @@ -365,6 +376,7 @@ extension MarketplaceCatalog { private enum CodingKeys: String, CodingKey { case details = "Details" + case detailsDocument = "DetailsDocument" case entityArn = "EntityArn" case entityIdentifier = "EntityIdentifier" case entityType = "EntityType" @@ -483,7 +495,7 @@ extension MarketplaceCatalog { AWSMemberEncoding(label: "resourceArn", location: .querystring("resourceArn")) ] - /// The Amazon Resource Name (ARN) of the Entity resource that is associated with the resource policy. + /// The Amazon Resource Name (ARN) of the entity resource that is associated with the resource policy. public let resourceArn: String public init(resourceArn: String) { @@ -586,6 +598,7 @@ extension MarketplaceCatalog { public let maxResults: Int? /// The value of the next token, if it exists. Null if there are no more results. public let nextToken: String? + /// Filters the returned set of entities based on their owner. The default is SELF. To list entities shared with you through AWS Resource Access Manager (AWS RAM), set to SHARED. Entities shared through the AWS Marketplace Catalog API PutResourcePolicy operation can't be discovered through the SHARED parameter. public let ownershipType: OwnershipType? /// An object that contains two attributes, SortBy and SortOrder. public let sort: Sort? @@ -687,7 +700,7 @@ extension MarketplaceCatalog { public struct PutResourcePolicyRequest: AWSEncodableShape { /// The policy document to set; formatted in JSON. public let policy: String - /// The Amazon Resource Name (ARN) of the Entity resource you want to associate with a resource policy. + /// The Amazon Resource Name (ARN) of the entity resource you want to associate with a resource policy. public let resourceArn: String public init(policy: String, resourceArn: String) { @@ -715,7 +728,7 @@ extension MarketplaceCatalog { } public struct Sort: AWSEncodableShape { - /// For ListEntities, supported attributes include LastModifiedDate (default), Visibility, EntityId, and Name. For ListChangeSets, supported attributes include StartTime and EndTime. + /// For ListEntities, supported attributes include LastModifiedDate (default) and EntityId. In addition to LastModifiedDate and EntityId, each EntityType might support additional fields. For ListChangeSets, supported attributes include StartTime and EndTime. public let sortBy: String? /// The sorting order. Can be ASCENDING or DESCENDING. The default value is DESCENDING. public let sortOrder: SortOrder? diff --git a/Sources/Soto/Services/MediaTailor/MediaTailor_shapes.swift b/Sources/Soto/Services/MediaTailor/MediaTailor_shapes.swift index 6973473b4f..35d5b0c0be 100644 --- a/Sources/Soto/Services/MediaTailor/MediaTailor_shapes.swift +++ b/Sources/Soto/Services/MediaTailor/MediaTailor_shapes.swift @@ -169,6 +169,19 @@ extension MediaTailor { } } + public struct AdBreakOpportunity: AWSDecodableShape { + /// The offset in milliseconds from the start of the VOD source at which an ad marker was detected. + public let offsetMillis: Int64 + + public init(offsetMillis: Int64) { + self.offsetMillis = offsetMillis + } + + private enum CodingKeys: String, CodingKey { + case offsetMillis = "OffsetMillis" + } + } + public struct AdMarkerPassthrough: AWSEncodableShape & AWSDecodableShape { /// Enables ad marker passthrough for your configuration. public let enabled: Bool? @@ -1416,6 +1429,8 @@ extension MediaTailor { } public struct DescribeVodSourceResponse: AWSDecodableShape { + /// The ad break opportunities within the VOD source. + public let adBreakOpportunities: [AdBreakOpportunity]? /// The ARN of the VOD source. public let arn: String? /// The timestamp that indicates when the VOD source was created. @@ -1433,7 +1448,8 @@ extension MediaTailor { /// The name of the VOD source. public let vodSourceName: String? - public init(arn: String? = nil, creationTime: Date? = nil, httpPackageConfigurations: [HttpPackageConfiguration]? = nil, lastModifiedTime: Date? = nil, sourceLocationName: String? = nil, tags: [String: String]? = nil, vodSourceName: String? = nil) { + public init(adBreakOpportunities: [AdBreakOpportunity]? = nil, arn: String? = nil, creationTime: Date? = nil, httpPackageConfigurations: [HttpPackageConfiguration]? = nil, lastModifiedTime: Date? = nil, sourceLocationName: String? = nil, tags: [String: String]? = nil, vodSourceName: String? = nil) { + self.adBreakOpportunities = adBreakOpportunities self.arn = arn self.creationTime = creationTime self.httpPackageConfigurations = httpPackageConfigurations @@ -1444,6 +1460,7 @@ extension MediaTailor { } private enum CodingKeys: String, CodingKey { + case adBreakOpportunities = "AdBreakOpportunities" case arn = "Arn" case creationTime = "CreationTime" case httpPackageConfigurations = "HttpPackageConfigurations" diff --git a/Sources/Soto/Services/Mgn/Mgn_api+async.swift b/Sources/Soto/Services/Mgn/Mgn_api+async.swift index cf87aac1f0..dc8dca71ca 100644 --- a/Sources/Soto/Services/Mgn/Mgn_api+async.swift +++ b/Sources/Soto/Services/Mgn/Mgn_api+async.swift @@ -51,6 +51,11 @@ extension Mgn { return try await self.client.execute(operation: "CreateApplication", path: "/CreateApplication", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Create Connector. + public func createConnector(_ input: CreateConnectorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> Connector { + return try await self.client.execute(operation: "CreateConnector", path: "/CreateConnector", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Creates a new Launch Configuration Template. public func createLaunchConfigurationTemplate(_ input: CreateLaunchConfigurationTemplateRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> LaunchConfigurationTemplate { return try await self.client.execute(operation: "CreateLaunchConfigurationTemplate", path: "/CreateLaunchConfigurationTemplate", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -71,6 +76,11 @@ extension Mgn { return try await self.client.execute(operation: "DeleteApplication", path: "/DeleteApplication", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Delete Connector. + public func deleteConnector(_ input: DeleteConnectorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { + return try await self.client.execute(operation: "DeleteConnector", path: "/DeleteConnector", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Deletes a single Job by ID. public func deleteJob(_ input: DeleteJobRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteJobResponse { return try await self.client.execute(operation: "DeleteJob", path: "/DeleteJob", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -171,6 +181,11 @@ extension Mgn { return try await self.client.execute(operation: "ListApplications", path: "/ListApplications", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// List Connectors. + public func listConnectors(_ input: ListConnectorsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListConnectorsResponse { + return try await self.client.execute(operation: "ListConnectors", path: "/ListConnectors", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// List export errors. public func listExportErrors(_ input: ListExportErrorsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListExportErrorsResponse { return try await self.client.execute(operation: "ListExportErrors", path: "/ListExportErrors", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -316,6 +331,11 @@ extension Mgn { return try await self.client.execute(operation: "UpdateApplication", path: "/UpdateApplication", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Update Connector. + public func updateConnector(_ input: UpdateConnectorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> Connector { + return try await self.client.execute(operation: "UpdateConnector", path: "/UpdateConnector", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Updates multiple LaunchConfigurations by Source Server ID. public func updateLaunchConfiguration(_ input: UpdateLaunchConfigurationRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> LaunchConfiguration { return try await self.client.execute(operation: "UpdateLaunchConfiguration", path: "/UpdateLaunchConfiguration", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -336,6 +356,11 @@ extension Mgn { return try await self.client.execute(operation: "UpdateReplicationConfigurationTemplate", path: "/UpdateReplicationConfigurationTemplate", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Update Source Server. + public func updateSourceServer(_ input: UpdateSourceServerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SourceServer { + return try await self.client.execute(operation: "UpdateSourceServer", path: "/UpdateSourceServer", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type. public func updateSourceServerReplicationType(_ input: UpdateSourceServerReplicationTypeRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> SourceServer { return try await self.client.execute(operation: "UpdateSourceServerReplicationType", path: "/UpdateSourceServerReplicationType", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -505,6 +530,28 @@ extension Mgn { ) } + /// List Connectors. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listConnectorsPaginator( + _ input: ListConnectorsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listConnectors, + inputKey: \ListConnectorsRequest.nextToken, + outputKey: \ListConnectorsResponse.nextToken, + logger: logger, + on: eventLoop + ) + } + /// List export errors. /// Return PaginatorSequence for operation. /// diff --git a/Sources/Soto/Services/Mgn/Mgn_api.swift b/Sources/Soto/Services/Mgn/Mgn_api.swift index 082215392b..bb05e2de26 100644 --- a/Sources/Soto/Services/Mgn/Mgn_api.swift +++ b/Sources/Soto/Services/Mgn/Mgn_api.swift @@ -103,6 +103,11 @@ public struct Mgn: AWSService { return self.client.execute(operation: "CreateApplication", path: "/CreateApplication", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Create Connector. + public func createConnector(_ input: CreateConnectorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateConnector", path: "/CreateConnector", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Creates a new Launch Configuration Template. public func createLaunchConfigurationTemplate(_ input: CreateLaunchConfigurationTemplateRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateLaunchConfigurationTemplate", path: "/CreateLaunchConfigurationTemplate", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -123,6 +128,11 @@ public struct Mgn: AWSService { return self.client.execute(operation: "DeleteApplication", path: "/DeleteApplication", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Delete Connector. + @discardableResult public func deleteConnector(_ input: DeleteConnectorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteConnector", path: "/DeleteConnector", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Deletes a single Job by ID. public func deleteJob(_ input: DeleteJobRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteJob", path: "/DeleteJob", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -223,6 +233,11 @@ public struct Mgn: AWSService { return self.client.execute(operation: "ListApplications", path: "/ListApplications", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// List Connectors. + public func listConnectors(_ input: ListConnectorsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListConnectors", path: "/ListConnectors", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// List export errors. public func listExportErrors(_ input: ListExportErrorsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ListExportErrors", path: "/ListExportErrors", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -368,6 +383,11 @@ public struct Mgn: AWSService { return self.client.execute(operation: "UpdateApplication", path: "/UpdateApplication", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Update Connector. + public func updateConnector(_ input: UpdateConnectorRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateConnector", path: "/UpdateConnector", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Updates multiple LaunchConfigurations by Source Server ID. public func updateLaunchConfiguration(_ input: UpdateLaunchConfigurationRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "UpdateLaunchConfiguration", path: "/UpdateLaunchConfiguration", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -388,6 +408,11 @@ public struct Mgn: AWSService { return self.client.execute(operation: "UpdateReplicationConfigurationTemplate", path: "/UpdateReplicationConfigurationTemplate", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Update Source Server. + public func updateSourceServer(_ input: UpdateSourceServerRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateSourceServer", path: "/UpdateSourceServer", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type. public func updateSourceServerReplicationType(_ input: UpdateSourceServerReplicationTypeRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "UpdateSourceServerReplicationType", path: "/UpdateSourceServerReplicationType", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -782,6 +807,59 @@ extension Mgn { ) } + /// List Connectors. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listConnectorsPaginator( + _ input: ListConnectorsRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListConnectorsResponse, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listConnectors, + inputKey: \ListConnectorsRequest.nextToken, + outputKey: \ListConnectorsResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listConnectorsPaginator( + _ input: ListConnectorsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListConnectorsResponse, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listConnectors, + inputKey: \ListConnectorsRequest.nextToken, + outputKey: \ListConnectorsResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + /// List export errors. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. @@ -1280,6 +1358,16 @@ extension Mgn.ListApplicationsRequest: AWSPaginateToken { } } +extension Mgn.ListConnectorsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Mgn.ListConnectorsRequest { + return .init( + filters: self.filters, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Mgn.ListExportErrorsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Mgn.ListExportErrorsRequest { return .init( diff --git a/Sources/Soto/Services/Mgn/Mgn_shapes.swift b/Sources/Soto/Services/Mgn/Mgn_shapes.swift index c7c5fc42bb..258c6cd40f 100644 --- a/Sources/Soto/Services/Mgn/Mgn_shapes.swift +++ b/Sources/Soto/Services/Mgn/Mgn_shapes.swift @@ -590,6 +590,71 @@ extension Mgn { } } + public struct Connector: AWSDecodableShape { + /// Connector arn. + public let arn: String? + /// Connector ID. + public let connectorID: String? + /// Connector name. + public let name: String? + /// Connector SSM command config. + public let ssmCommandConfig: ConnectorSsmCommandConfig? + /// Connector SSM instance ID. + public let ssmInstanceID: String? + /// Connector tags. + public let tags: [String: String]? + + public init(arn: String? = nil, connectorID: String? = nil, name: String? = nil, ssmCommandConfig: ConnectorSsmCommandConfig? = nil, ssmInstanceID: String? = nil, tags: [String: String]? = nil) { + self.arn = arn + self.connectorID = connectorID + self.name = name + self.ssmCommandConfig = ssmCommandConfig + self.ssmInstanceID = ssmInstanceID + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case connectorID = "connectorID" + case name = "name" + case ssmCommandConfig = "ssmCommandConfig" + case ssmInstanceID = "ssmInstanceID" + case tags = "tags" + } + } + + public struct ConnectorSsmCommandConfig: AWSEncodableShape & AWSDecodableShape { + /// Connector SSM command config CloudWatch log group name. + public let cloudWatchLogGroupName: String? + /// Connector SSM command config CloudWatch output enabled. + public let cloudWatchOutputEnabled: Bool + /// Connector SSM command config output S3 bucket name. + public let outputS3BucketName: String? + /// Connector SSM command config S3 output enabled. + public let s3OutputEnabled: Bool + + public init(cloudWatchLogGroupName: String? = nil, cloudWatchOutputEnabled: Bool, outputS3BucketName: String? = nil, s3OutputEnabled: Bool) { + self.cloudWatchLogGroupName = cloudWatchLogGroupName + self.cloudWatchOutputEnabled = cloudWatchOutputEnabled + self.outputS3BucketName = outputS3BucketName + self.s3OutputEnabled = s3OutputEnabled + } + + public func validate(name: String) throws { + try self.validate(self.cloudWatchLogGroupName, name: "cloudWatchLogGroupName", parent: name, max: 512) + try self.validate(self.cloudWatchLogGroupName, name: "cloudWatchLogGroupName", parent: name, min: 1) + try self.validate(self.cloudWatchLogGroupName, name: "cloudWatchLogGroupName", parent: name, pattern: "^[\\.\\-_/#A-Za-z0-9]+$") + try self.validate(self.outputS3BucketName, name: "outputS3BucketName", parent: name, pattern: "^[a-zA-Z0-9.\\-_]{1,255}$") + } + + private enum CodingKeys: String, CodingKey { + case cloudWatchLogGroupName = "cloudWatchLogGroupName" + case cloudWatchOutputEnabled = "cloudWatchOutputEnabled" + case outputS3BucketName = "outputS3BucketName" + case s3OutputEnabled = "s3OutputEnabled" + } + } + public struct CreateApplicationRequest: AWSEncodableShape { /// Account ID. public let accountID: String? @@ -631,6 +696,46 @@ extension Mgn { } } + public struct CreateConnectorRequest: AWSEncodableShape { + /// Create Connector request name. + public let name: String + /// Create Connector request SSM command config. + public let ssmCommandConfig: ConnectorSsmCommandConfig? + /// Create Connector request SSM instance ID. + public let ssmInstanceID: String + /// Create Connector request tags. + public let tags: [String: String]? + + public init(name: String, ssmCommandConfig: ConnectorSsmCommandConfig? = nil, ssmInstanceID: String, tags: [String: String]? = nil) { + self.name = name + self.ssmCommandConfig = ssmCommandConfig + self.ssmInstanceID = ssmInstanceID + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9_-]+$") + try self.ssmCommandConfig?.validate(name: "\(name).ssmCommandConfig") + try self.validate(self.ssmInstanceID, name: "ssmInstanceID", parent: name, max: 20) + try self.validate(self.ssmInstanceID, name: "ssmInstanceID", parent: name, min: 19) + try self.validate(self.ssmInstanceID, name: "ssmInstanceID", parent: name, pattern: "(^i-[0-9a-zA-Z]{17}$)|(^mi-[0-9a-zA-Z]{17}$)") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case ssmCommandConfig = "ssmCommandConfig" + case ssmInstanceID = "ssmInstanceID" + case tags = "tags" + } + } + public struct CreateLaunchConfigurationTemplateRequest: AWSEncodableShape { /// Associate public Ip address. public let associatePublicIpAddress: Bool? @@ -990,6 +1095,25 @@ extension Mgn { public init() {} } + public struct DeleteConnectorRequest: AWSEncodableShape { + /// Delete Connector request connector ID. + public let connectorID: String + + public init(connectorID: String) { + self.connectorID = connectorID + } + + public func validate(name: String) throws { + try self.validate(self.connectorID, name: "connectorID", parent: name, max: 27) + try self.validate(self.connectorID, name: "connectorID", parent: name, min: 27) + try self.validate(self.connectorID, name: "connectorID", parent: name, pattern: "^connector-[0-9a-zA-Z]{17}$") + } + + private enum CodingKeys: String, CodingKey { + case connectorID = "connectorID" + } + } + public struct DeleteJobRequest: AWSEncodableShape { /// Request to delete Job from service by Account ID. public let accountID: String? @@ -2596,6 +2720,73 @@ extension Mgn { } } + public struct ListConnectorsRequest: AWSEncodableShape { + /// List Connectors Request filters. + public let filters: ListConnectorsRequestFilters? + /// List Connectors Request max results. + public let maxResults: Int? + /// List Connectors Request next token. + public let nextToken: String? + + public init(filters: ListConnectorsRequestFilters? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.filters?.validate(name: "\(name).filters") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListConnectorsRequestFilters: AWSEncodableShape { + /// List Connectors Request Filters connector IDs. + public let connectorIDs: [String]? + + public init(connectorIDs: [String]? = nil) { + self.connectorIDs = connectorIDs + } + + public func validate(name: String) throws { + try self.connectorIDs?.forEach { + try validate($0, name: "connectorIDs[]", parent: name, max: 27) + try validate($0, name: "connectorIDs[]", parent: name, min: 27) + try validate($0, name: "connectorIDs[]", parent: name, pattern: "^connector-[0-9a-zA-Z]{17}$") + } + try self.validate(self.connectorIDs, name: "connectorIDs", parent: name, max: 20) + } + + private enum CodingKeys: String, CodingKey { + case connectorIDs = "connectorIDs" + } + } + + public struct ListConnectorsResponse: AWSDecodableShape { + /// List connectors response items. + public let items: [Connector]? + /// List connectors response next token. + public let nextToken: String? + + public init(items: [Connector]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + public struct ListExportErrorsRequest: AWSEncodableShape { /// List export errors request export id. public let exportID: String @@ -3841,6 +4032,8 @@ extension Mgn { public let applicationID: String? /// Source server ARN. public let arn: String? + /// Source Server connector action. + public let connectorAction: SourceServerConnectorAction? /// Source server data replication info. public let dataReplicationInfo: DataReplicationInfo? /// Source server fqdn for action framework. @@ -3864,9 +4057,10 @@ extension Mgn { /// Source server vCenter client id. public let vcenterClientID: String? - public init(applicationID: String? = nil, arn: String? = nil, dataReplicationInfo: DataReplicationInfo? = nil, fqdnForActionFramework: String? = nil, isArchived: Bool? = nil, launchedInstance: LaunchedInstance? = nil, lifeCycle: LifeCycle? = nil, replicationType: ReplicationType? = nil, sourceProperties: SourceProperties? = nil, sourceServerID: String? = nil, tags: [String: String]? = nil, userProvidedID: String? = nil, vcenterClientID: String? = nil) { + public init(applicationID: String? = nil, arn: String? = nil, connectorAction: SourceServerConnectorAction? = nil, dataReplicationInfo: DataReplicationInfo? = nil, fqdnForActionFramework: String? = nil, isArchived: Bool? = nil, launchedInstance: LaunchedInstance? = nil, lifeCycle: LifeCycle? = nil, replicationType: ReplicationType? = nil, sourceProperties: SourceProperties? = nil, sourceServerID: String? = nil, tags: [String: String]? = nil, userProvidedID: String? = nil, vcenterClientID: String? = nil) { self.applicationID = applicationID self.arn = arn + self.connectorAction = connectorAction self.dataReplicationInfo = dataReplicationInfo self.fqdnForActionFramework = fqdnForActionFramework self.isArchived = isArchived @@ -3883,6 +4077,7 @@ extension Mgn { private enum CodingKeys: String, CodingKey { case applicationID = "applicationID" case arn = "arn" + case connectorAction = "connectorAction" case dataReplicationInfo = "dataReplicationInfo" case fqdnForActionFramework = "fqdnForActionFramework" case isArchived = "isArchived" @@ -3976,6 +4171,32 @@ extension Mgn { } } + public struct SourceServerConnectorAction: AWSEncodableShape & AWSDecodableShape { + /// Source Server connector action connector arn. + public let connectorArn: String? + /// Source Server connector action credentials secret arn. + public let credentialsSecretArn: String? + + public init(connectorArn: String? = nil, credentialsSecretArn: String? = nil) { + self.connectorArn = connectorArn + self.credentialsSecretArn = credentialsSecretArn + } + + public func validate(name: String) throws { + try self.validate(self.connectorArn, name: "connectorArn", parent: name, max: 100) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, min: 27) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, pattern: "^arn:[\\w-]+:mgn:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:connector\\/(connector-[0-9a-zA-Z]{17})$") + try self.validate(self.credentialsSecretArn, name: "credentialsSecretArn", parent: name, max: 100) + try self.validate(self.credentialsSecretArn, name: "credentialsSecretArn", parent: name, min: 20) + try self.validate(self.credentialsSecretArn, name: "credentialsSecretArn", parent: name, pattern: "^arn:[\\w-]+:secretsmanager:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:secret:(.+)$") + } + + private enum CodingKeys: String, CodingKey { + case connectorArn = "connectorArn" + case credentialsSecretArn = "credentialsSecretArn" + } + } + public struct SsmDocument: AWSEncodableShape & AWSDecodableShape { /// User-friendly name for the AWS Systems Manager Document. public let actionName: String @@ -4570,6 +4791,37 @@ extension Mgn { } } + public struct UpdateConnectorRequest: AWSEncodableShape { + /// Update Connector request connector ID. + public let connectorID: String + /// Update Connector request name. + public let name: String? + /// Update Connector request SSM command config. + public let ssmCommandConfig: ConnectorSsmCommandConfig? + + public init(connectorID: String, name: String? = nil, ssmCommandConfig: ConnectorSsmCommandConfig? = nil) { + self.connectorID = connectorID + self.name = name + self.ssmCommandConfig = ssmCommandConfig + } + + public func validate(name: String) throws { + try self.validate(self.connectorID, name: "connectorID", parent: name, max: 27) + try self.validate(self.connectorID, name: "connectorID", parent: name, min: 27) + try self.validate(self.connectorID, name: "connectorID", parent: name, pattern: "^connector-[0-9a-zA-Z]{17}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9_-]+$") + try self.ssmCommandConfig?.validate(name: "\(name).ssmCommandConfig") + } + + private enum CodingKeys: String, CodingKey { + case connectorID = "connectorID" + case name = "name" + case ssmCommandConfig = "ssmCommandConfig" + } + } + public struct UpdateLaunchConfigurationRequest: AWSEncodableShape { /// Update Launch configuration Account ID. public let accountID: String? @@ -4945,6 +5197,37 @@ extension Mgn { } } + public struct UpdateSourceServerRequest: AWSEncodableShape { + /// Update Source Server request account ID. + public let accountID: String? + /// Update Source Server request connector action. + public let connectorAction: SourceServerConnectorAction? + /// Update Source Server request source server ID. + public let sourceServerID: String + + public init(accountID: String? = nil, connectorAction: SourceServerConnectorAction? = nil, sourceServerID: String) { + self.accountID = accountID + self.connectorAction = connectorAction + self.sourceServerID = sourceServerID + } + + public func validate(name: String) throws { + try self.validate(self.accountID, name: "accountID", parent: name, max: 12) + try self.validate(self.accountID, name: "accountID", parent: name, min: 12) + try self.validate(self.accountID, name: "accountID", parent: name, pattern: "[0-9]{12,}") + try self.connectorAction?.validate(name: "\(name).connectorAction") + try self.validate(self.sourceServerID, name: "sourceServerID", parent: name, max: 19) + try self.validate(self.sourceServerID, name: "sourceServerID", parent: name, min: 19) + try self.validate(self.sourceServerID, name: "sourceServerID", parent: name, pattern: "^s-[0-9a-zA-Z]{17}$") + } + + private enum CodingKeys: String, CodingKey { + case accountID = "accountID" + case connectorAction = "connectorAction" + case sourceServerID = "sourceServerID" + } + } + public struct UpdateWaveRequest: AWSEncodableShape { /// Account ID. public let accountID: String? diff --git a/Sources/Soto/Services/Omics/Omics_shapes.swift b/Sources/Soto/Services/Omics/Omics_shapes.swift index 3566d6b7f5..759bdaf3cb 100644 --- a/Sources/Soto/Services/Omics/Omics_shapes.swift +++ b/Sources/Soto/Services/Omics/Omics_shapes.swift @@ -55,6 +55,13 @@ extension Omics { public var description: String { return self.rawValue } } + public enum ETagAlgorithm: String, CustomStringConvertible, Codable, Sendable { + case bamMd5Up = "BAM_MD5up" + case cramMd5Up = "CRAM_MD5up" + case fastqMd5Up = "FASTQ_MD5up" + public var description: String { return self.rawValue } + } + public enum EncryptionType: String, CustomStringConvertible, Codable, Sendable { /// KMS case kms = "KMS" @@ -1993,6 +2000,27 @@ extension Omics { private enum CodingKeys: CodingKey {} } + public struct ETag: AWSDecodableShape { + /// The algorithm used to calculate the read set’s ETag(s). + public let algorithm: ETagAlgorithm? + /// The ETag hash calculated on Source1 of the read set. + public let source1: String? + /// The ETag hash calculated on Source2 of the read set. + public let source2: String? + + public init(algorithm: ETagAlgorithm? = nil, source1: String? = nil, source2: String? = nil) { + self.algorithm = algorithm + self.source1 = source1 + self.source2 = source2 + } + + private enum CodingKeys: String, CodingKey { + case algorithm = "algorithm" + case source1 = "source1" + case source2 = "source2" + } + } + public struct ExportReadSet: AWSEncodableShape { /// The set's ID. public let readSetId: String @@ -2620,6 +2648,8 @@ extension Omics { public let creationType: CreationType? /// The read set's description. public let description: String? + /// The entity tag (ETag) is a hash of the object meant to represent its semantic content. + public let etag: ETag? /// The read set's files. public let files: ReadSetFiles? /// The read set's file type. @@ -2643,11 +2673,12 @@ extension Omics { /// The read set's subject ID. public let subjectId: String? - public init(arn: String, creationTime: Date, creationType: CreationType? = nil, description: String? = nil, files: ReadSetFiles? = nil, fileType: FileType, id: String, name: String? = nil, referenceArn: String? = nil, sampleId: String? = nil, sequenceInformation: SequenceInformation? = nil, sequenceStoreId: String, status: ReadSetStatus, statusMessage: String? = nil, subjectId: String? = nil) { + public init(arn: String, creationTime: Date, creationType: CreationType? = nil, description: String? = nil, etag: ETag? = nil, files: ReadSetFiles? = nil, fileType: FileType, id: String, name: String? = nil, referenceArn: String? = nil, sampleId: String? = nil, sequenceInformation: SequenceInformation? = nil, sequenceStoreId: String, status: ReadSetStatus, statusMessage: String? = nil, subjectId: String? = nil) { self.arn = arn self.creationTime = creationTime self.creationType = creationType self.description = description + self.etag = etag self.files = files self.fileType = fileType self.id = id @@ -2666,6 +2697,7 @@ extension Omics { case creationTime = "creationTime" case creationType = "creationType" case description = "description" + case etag = "etag" case files = "files" case fileType = "fileType" case id = "id" @@ -5208,6 +5240,8 @@ extension Omics { public let creationType: CreationType? /// The read set's description. public let description: String? + /// The entity tag (ETag) is a hash of the object representing its semantic content. + public let etag: ETag? /// The read set's file type. public let fileType: FileType /// The read set's ID. @@ -5228,11 +5262,12 @@ extension Omics { /// The read set's subject ID. public let subjectId: String? - public init(arn: String, creationTime: Date, creationType: CreationType? = nil, description: String? = nil, fileType: FileType, id: String, name: String? = nil, referenceArn: String? = nil, sampleId: String? = nil, sequenceInformation: SequenceInformation? = nil, sequenceStoreId: String, status: ReadSetStatus, statusMessage: String? = nil, subjectId: String? = nil) { + public init(arn: String, creationTime: Date, creationType: CreationType? = nil, description: String? = nil, etag: ETag? = nil, fileType: FileType, id: String, name: String? = nil, referenceArn: String? = nil, sampleId: String? = nil, sequenceInformation: SequenceInformation? = nil, sequenceStoreId: String, status: ReadSetStatus, statusMessage: String? = nil, subjectId: String? = nil) { self.arn = arn self.creationTime = creationTime self.creationType = creationType self.description = description + self.etag = etag self.fileType = fileType self.id = id self.name = name @@ -5250,6 +5285,7 @@ extension Omics { case creationTime = "creationTime" case creationType = "creationType" case description = "description" + case etag = "etag" case fileType = "fileType" case id = "id" case name = "name" diff --git a/Sources/Soto/Services/Pricing/Pricing_api+async.swift b/Sources/Soto/Services/Pricing/Pricing_api+async.swift index 97f98ab545..2f7a2c046d 100644 --- a/Sources/Soto/Services/Pricing/Pricing_api+async.swift +++ b/Sources/Soto/Services/Pricing/Pricing_api+async.swift @@ -31,7 +31,7 @@ extension Pricing { return try await self.client.execute(operation: "GetAttributeValues", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response. + /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response. public func getPriceListFileUrl(_ input: GetPriceListFileUrlRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetPriceListFileUrlResponse { return try await self.client.execute(operation: "GetPriceListFileUrl", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -41,7 +41,7 @@ extension Pricing { return try await self.client.execute(operation: "GetProducts", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. + /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. public func listPriceLists(_ input: ListPriceListsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListPriceListsResponse { return try await self.client.execute(operation: "ListPriceLists", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -117,7 +117,7 @@ extension Pricing { ) } - /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. + /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/Pricing/Pricing_api.swift b/Sources/Soto/Services/Pricing/Pricing_api.swift index 274bce177f..3fd8ecf285 100644 --- a/Sources/Soto/Services/Pricing/Pricing_api.swift +++ b/Sources/Soto/Services/Pricing/Pricing_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS Pricing service. /// -/// The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following: Build cost control and scenario planning tools Reconcile billing data Forecast future spend for budgeting purposes Provide cost benefit analysis that compare your internal workloads with Amazon Web Services Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType. You can use the following endpoints for the Amazon Web Services Price List API: https://api.pricing.us-east-1.amazonaws.com https://api.pricing.ap-south-1.amazonaws.com +/// The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following: Build cost control and scenario planning tools Reconcile billing data Forecast future spend for budgeting purposes Provide cost benefit analysis that compare your internal workloads with Amazon Web Services Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType. For more information, see Using the Amazon Web Services Price List API in the Billing User Guide. public struct Pricing: AWSService { // MARK: Member variables @@ -75,7 +75,7 @@ public struct Pricing: AWSService { return self.client.execute(operation: "GetAttributeValues", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response. + /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response. public func getPriceListFileUrl(_ input: GetPriceListFileUrlRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "GetPriceListFileUrl", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -85,7 +85,7 @@ public struct Pricing: AWSService { return self.client.execute(operation: "GetProducts", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. + /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. public func listPriceLists(_ input: ListPriceListsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ListPriceLists", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -262,7 +262,7 @@ extension Pricing { ) } - /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. + /// This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10). This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. diff --git a/Sources/Soto/Services/Pricing/Pricing_shapes.swift b/Sources/Soto/Services/Pricing/Pricing_shapes.swift index 13ea6c21e1..783934e7da 100644 --- a/Sources/Soto/Services/Pricing/Pricing_shapes.swift +++ b/Sources/Soto/Services/Pricing/Pricing_shapes.swift @@ -166,9 +166,9 @@ extension Pricing { } public struct GetPriceListFileUrlRequest: AWSEncodableShape { - /// The format that you want to retrieve your Price List files in. The FileFormat can be obtained from the ListPriceLists response. + /// The format that you want to retrieve your Price List files in. The FileFormat can be obtained from the ListPriceLists response. public let fileFormat: String - /// The unique identifier that maps to where your Price List files are located. PriceListArn can be obtained from the ListPriceLists response. + /// The unique identifier that maps to where your Price List files are located. PriceListArn can be obtained from the ListPriceLists response. public let priceListArn: String public init(fileFormat: String, priceListArn: String) { @@ -267,9 +267,9 @@ extension Pricing { public let maxResults: Int? /// The pagination token that indicates the next set of results that you want to retrieve. public let nextToken: String? - /// This is used to filter the Price List by Amazon Web Services Region. For example, to get the price list only for the US East (N. Virginia) Region, use us-east-1. If nothing is specified, you retrieve price lists for all applicable Regions. The available RegionCode list can be retrieved from GetAttributeValues API. + /// This is used to filter the Price List by Amazon Web Services Region. For example, to get the price list only for the US East (N. Virginia) Region, use us-east-1. If nothing is specified, you retrieve price lists for all applicable Regions. The available RegionCode list can be retrieved from GetAttributeValues API. public let regionCode: String? - /// The service code or the Savings Plan service code for the attributes that you want to retrieve. For example, to get the list of applicable Amazon EC2 price lists, use AmazonEC2. For a full list of service codes containing On-Demand and Reserved Instance (RI) pricing, use the DescribeServices API. To retrieve the Compute Savings Plan price lists, use ComputeSavingsPlans. To retrieve Machine Learning Savings Plans price lists, use MachineLearningSavingsPlans. + /// The service code or the Savings Plan service code for the attributes that you want to retrieve. For example, to get the list of applicable Amazon EC2 price lists, use AmazonEC2. For a full list of service codes containing On-Demand and Reserved Instance (RI) pricing, use the DescribeServices API. To retrieve the Reserved Instance and Compute Savings Plan price lists, use ComputeSavingsPlans. To retrieve Machine Learning Savings Plans price lists, use MachineLearningSavingsPlans. public let serviceCode: String public init(currencyCode: String, effectiveDate: Date, maxResults: Int? = nil, nextToken: String? = nil, regionCode: String? = nil, serviceCode: String) { diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 1b7f040900..b3bfbdfcea 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -889,6 +889,7 @@ extension QuickSight { case analysis = "ANALYSIS" case dashboard = "DASHBOARD" case dataset = "DATASET" + case datasource = "DATASOURCE" case topic = "TOPIC" public var description: String { return self.rawValue } } @@ -1349,6 +1350,7 @@ extension QuickSight { } public enum TableTotalsPlacement: String, CustomStringConvertible, Codable, Sendable { + case auto = "AUTO" case end = "END" case start = "START" public var description: String { return self.rawValue } @@ -1514,6 +1516,12 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum ValidationStrategyMode: String, CustomStringConvertible, Codable, Sendable { + case lenient = "LENIENT" + case strict = "STRICT" + public var description: String { return self.rawValue } + } + public enum ValueWhenUnsetOption: String, CustomStringConvertible, Codable, Sendable { case null = "NULL" case recommendedValue = "RECOMMENDED_VALUE" @@ -5851,8 +5859,10 @@ extension QuickSight { public let tags: [Tag]? /// The ARN for the theme to apply to the analysis that you're creating. To see the theme in the Amazon QuickSight console, make sure that you have access to it. public let themeArn: String? + /// The option to relax the validation needed to create an analysis with definition objects. This skips the validation step for specific errors. + public let validationStrategy: ValidationStrategy? - public init(analysisId: String, awsAccountId: String, definition: AnalysisDefinition? = nil, name: String, parameters: Parameters? = nil, permissions: [ResourcePermission]? = nil, sourceEntity: AnalysisSourceEntity? = nil, tags: [Tag]? = nil, themeArn: String? = nil) { + public init(analysisId: String, awsAccountId: String, definition: AnalysisDefinition? = nil, name: String, parameters: Parameters? = nil, permissions: [ResourcePermission]? = nil, sourceEntity: AnalysisSourceEntity? = nil, tags: [Tag]? = nil, themeArn: String? = nil, validationStrategy: ValidationStrategy? = nil) { self.analysisId = analysisId self.awsAccountId = awsAccountId self.definition = definition @@ -5862,6 +5872,7 @@ extension QuickSight { self.sourceEntity = sourceEntity self.tags = tags self.themeArn = themeArn + self.validationStrategy = validationStrategy } public func validate(name: String) throws { @@ -5896,6 +5907,7 @@ extension QuickSight { case sourceEntity = "SourceEntity" case tags = "Tags" case themeArn = "ThemeArn" + case validationStrategy = "ValidationStrategy" } } @@ -5979,10 +5991,12 @@ extension QuickSight { public let tags: [Tag]? /// The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that is used in the source entity. The theme ARN must exist in the same Amazon Web Services account where you create the dashboard. public let themeArn: String? + /// The option to relax the validation needed to create a dashboard with definition objects. This option skips the validation step for specific errors. + public let validationStrategy: ValidationStrategy? /// A description for the first version of the dashboard being created. public let versionDescription: String? - public init(awsAccountId: String, dashboardId: String, dashboardPublishOptions: DashboardPublishOptions? = nil, definition: DashboardVersionDefinition? = nil, name: String, parameters: Parameters? = nil, permissions: [ResourcePermission]? = nil, sourceEntity: DashboardSourceEntity? = nil, tags: [Tag]? = nil, themeArn: String? = nil, versionDescription: String? = nil) { + public init(awsAccountId: String, dashboardId: String, dashboardPublishOptions: DashboardPublishOptions? = nil, definition: DashboardVersionDefinition? = nil, name: String, parameters: Parameters? = nil, permissions: [ResourcePermission]? = nil, sourceEntity: DashboardSourceEntity? = nil, tags: [Tag]? = nil, themeArn: String? = nil, validationStrategy: ValidationStrategy? = nil, versionDescription: String? = nil) { self.awsAccountId = awsAccountId self.dashboardId = dashboardId self.dashboardPublishOptions = dashboardPublishOptions @@ -5993,6 +6007,7 @@ extension QuickSight { self.sourceEntity = sourceEntity self.tags = tags self.themeArn = themeArn + self.validationStrategy = validationStrategy self.versionDescription = versionDescription } @@ -6031,6 +6046,7 @@ extension QuickSight { case sourceEntity = "SourceEntity" case tags = "Tags" case themeArn = "ThemeArn" + case validationStrategy = "ValidationStrategy" case versionDescription = "VersionDescription" } } @@ -7044,13 +7060,15 @@ extension QuickSight { /// An ID for the template that you want to create. This template is unique per Amazon Web Services Region; in /// each Amazon Web Services account. public let templateId: String + /// TThe option to relax the validation needed to create a template with definition objects. This skips the validation step for specific errors. + public let validationStrategy: ValidationStrategy? /// A description of the current template version being created. This API operation creates the /// first version of the template. Every time UpdateTemplate is called, a new /// version is created. Each version of the template maintains a description of the version /// in the VersionDescription field. public let versionDescription: String? - public init(awsAccountId: String, definition: TemplateVersionDefinition? = nil, name: String? = nil, permissions: [ResourcePermission]? = nil, sourceEntity: TemplateSourceEntity? = nil, tags: [Tag]? = nil, templateId: String, versionDescription: String? = nil) { + public init(awsAccountId: String, definition: TemplateVersionDefinition? = nil, name: String? = nil, permissions: [ResourcePermission]? = nil, sourceEntity: TemplateSourceEntity? = nil, tags: [Tag]? = nil, templateId: String, validationStrategy: ValidationStrategy? = nil, versionDescription: String? = nil) { self.awsAccountId = awsAccountId self.definition = definition self.name = name @@ -7058,6 +7076,7 @@ extension QuickSight { self.sourceEntity = sourceEntity self.tags = tags self.templateId = templateId + self.validationStrategy = validationStrategy self.versionDescription = versionDescription } @@ -7092,6 +7111,7 @@ extension QuickSight { case permissions = "Permissions" case sourceEntity = "SourceEntity" case tags = "Tags" + case validationStrategy = "ValidationStrategy" case versionDescription = "VersionDescription" } } @@ -14971,12 +14991,15 @@ extension QuickSight { public let categoryValues: [String]? /// The match operator that is used to determine if a filter should be applied. public let matchOperator: CategoryFilterMatchOperator + /// This option determines how null values should be treated when filtering data. ALL_VALUES: Include null values in filtered results. NULLS_ONLY: Only include null values in filtered results. NON_NULLS_ONLY: Exclude null values from filtered results. + public let nullOption: FilterNullOption? /// Select all of the values. Null is not the assigned value of select all. FILTER_ALL_VALUES public let selectAllOptions: CategoryFilterSelectAllOptions? - public init(categoryValues: [String]? = nil, matchOperator: CategoryFilterMatchOperator, selectAllOptions: CategoryFilterSelectAllOptions? = nil) { + public init(categoryValues: [String]? = nil, matchOperator: CategoryFilterMatchOperator, nullOption: FilterNullOption? = nil, selectAllOptions: CategoryFilterSelectAllOptions? = nil) { self.categoryValues = categoryValues self.matchOperator = matchOperator + self.nullOption = nullOption self.selectAllOptions = selectAllOptions } @@ -14990,6 +15013,7 @@ extension QuickSight { private enum CodingKeys: String, CodingKey { case categoryValues = "CategoryValues" case matchOperator = "MatchOperator" + case nullOption = "NullOption" case selectAllOptions = "SelectAllOptions" } } @@ -23839,6 +23863,44 @@ extension QuickSight { } } + public struct RedshiftIAMParameters: AWSEncodableShape & AWSDecodableShape { + /// Automatically creates a database user. If your database doesn't have a DatabaseUser, set this parameter to True. If there is no DatabaseUser, Amazon QuickSight can't connect to your cluster. The RoleArn that you use for this operation must grant access to redshift:CreateClusterUser to successfully create the user. + public let autoCreateDatabaseUser: Bool? + /// A list of groups whose permissions will be granted to Amazon QuickSight to access the cluster. These permissions are combined with the permissions granted to Amazon QuickSight by the DatabaseUser. If you choose to include this parameter, the RoleArn must grant access to redshift:JoinGroup. + public let databaseGroups: [String]? + /// The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser to True to create a new user with PUBLIC permissions. + public let databaseUser: String + /// Use the RoleArn structure to allow Amazon QuickSight to call redshift:GetClusterCredentials on your cluster. The calling principal must have iam:PassRole access to pass the role to Amazon QuickSight. The role's trust policy must allow the Amazon QuickSight service principal to assume the role. + public let roleArn: String + + public init(autoCreateDatabaseUser: Bool? = nil, databaseGroups: [String]? = nil, databaseUser: String, roleArn: String) { + self.autoCreateDatabaseUser = autoCreateDatabaseUser + self.databaseGroups = databaseGroups + self.databaseUser = databaseUser + self.roleArn = roleArn + } + + public func validate(name: String) throws { + try self.databaseGroups?.forEach { + try validate($0, name: "databaseGroups[]", parent: name, max: 64) + try validate($0, name: "databaseGroups[]", parent: name, min: 1) + } + try self.validate(self.databaseGroups, name: "databaseGroups", parent: name, max: 50) + try self.validate(self.databaseGroups, name: "databaseGroups", parent: name, min: 1) + try self.validate(self.databaseUser, name: "databaseUser", parent: name, max: 64) + try self.validate(self.databaseUser, name: "databaseUser", parent: name, min: 1) + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + } + + private enum CodingKeys: String, CodingKey { + case autoCreateDatabaseUser = "AutoCreateDatabaseUser" + case databaseGroups = "DatabaseGroups" + case databaseUser = "DatabaseUser" + case roleArn = "RoleArn" + } + } + public struct RedshiftParameters: AWSEncodableShape & AWSDecodableShape { /// Cluster ID. This field can be blank if the Host and Port are provided. public let clusterId: String? @@ -23846,13 +23908,16 @@ extension QuickSight { public let database: String /// Host. This field can be blank if ClusterId is provided. public let host: String? + /// An optional parameter that uses IAM authentication to grant Amazon QuickSight access to your cluster. This parameter can be used instead of DataSourceCredentials. + public let iamParameters: RedshiftIAMParameters? /// Port. This field can be blank if the ClusterId is provided. public let port: Int? - public init(clusterId: String? = nil, database: String, host: String? = nil, port: Int? = nil) { + public init(clusterId: String? = nil, database: String, host: String? = nil, iamParameters: RedshiftIAMParameters? = nil, port: Int? = nil) { self.clusterId = clusterId self.database = database self.host = host + self.iamParameters = iamParameters self.port = port } @@ -23863,6 +23928,7 @@ extension QuickSight { try self.validate(self.database, name: "database", parent: name, min: 1) try self.validate(self.host, name: "host", parent: name, max: 256) try self.validate(self.host, name: "host", parent: name, min: 1) + try self.iamParameters?.validate(name: "\(name).iamParameters") try self.validate(self.port, name: "port", parent: name, max: 65535) try self.validate(self.port, name: "port", parent: name, min: 0) } @@ -23871,6 +23937,7 @@ extension QuickSight { case clusterId = "ClusterId" case database = "Database" case host = "Host" + case iamParameters = "IAMParameters" case port = "Port" } } @@ -24448,9 +24515,9 @@ extension QuickSight { } try self.validate(self.inputColumns, name: "inputColumns", parent: name, max: 2048) try self.validate(self.inputColumns, name: "inputColumns", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, max: 256) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.schema, name: "schema", parent: name, max: 64) + try self.validate(self.schema, name: "schema", parent: name, max: 256) } private enum CodingKeys: String, CodingKey { @@ -30653,8 +30720,10 @@ extension QuickSight { public let sourceEntity: AnalysisSourceEntity? /// The Amazon Resource Name (ARN) for the theme to apply to the analysis that you're creating. To see the theme in the Amazon QuickSight console, make sure that you have access to it. public let themeArn: String? + /// The option to relax the validation needed to update an analysis with definition objects. This skips the validation step for specific errors. + public let validationStrategy: ValidationStrategy? - public init(analysisId: String, awsAccountId: String, definition: AnalysisDefinition? = nil, name: String, parameters: Parameters? = nil, sourceEntity: AnalysisSourceEntity? = nil, themeArn: String? = nil) { + public init(analysisId: String, awsAccountId: String, definition: AnalysisDefinition? = nil, name: String, parameters: Parameters? = nil, sourceEntity: AnalysisSourceEntity? = nil, themeArn: String? = nil, validationStrategy: ValidationStrategy? = nil) { self.analysisId = analysisId self.awsAccountId = awsAccountId self.definition = definition @@ -30662,6 +30731,7 @@ extension QuickSight { self.parameters = parameters self.sourceEntity = sourceEntity self.themeArn = themeArn + self.validationStrategy = validationStrategy } public func validate(name: String) throws { @@ -30684,6 +30754,7 @@ extension QuickSight { case parameters = "Parameters" case sourceEntity = "SourceEntity" case themeArn = "ThemeArn" + case validationStrategy = "ValidationStrategy" } } @@ -30902,10 +30973,12 @@ extension QuickSight { public let sourceEntity: DashboardSourceEntity? /// The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that was originally associated with the entity. The theme ARN must exist in the same Amazon Web Services account where you create the dashboard. public let themeArn: String? + /// The option to relax the validation needed to update a dashboard with definition objects. This skips the validation step for specific errors. + public let validationStrategy: ValidationStrategy? /// A description for the first version of the dashboard being created. public let versionDescription: String? - public init(awsAccountId: String, dashboardId: String, dashboardPublishOptions: DashboardPublishOptions? = nil, definition: DashboardVersionDefinition? = nil, name: String, parameters: Parameters? = nil, sourceEntity: DashboardSourceEntity? = nil, themeArn: String? = nil, versionDescription: String? = nil) { + public init(awsAccountId: String, dashboardId: String, dashboardPublishOptions: DashboardPublishOptions? = nil, definition: DashboardVersionDefinition? = nil, name: String, parameters: Parameters? = nil, sourceEntity: DashboardSourceEntity? = nil, themeArn: String? = nil, validationStrategy: ValidationStrategy? = nil, versionDescription: String? = nil) { self.awsAccountId = awsAccountId self.dashboardId = dashboardId self.dashboardPublishOptions = dashboardPublishOptions @@ -30914,6 +30987,7 @@ extension QuickSight { self.parameters = parameters self.sourceEntity = sourceEntity self.themeArn = themeArn + self.validationStrategy = validationStrategy self.versionDescription = versionDescription } @@ -30940,6 +31014,7 @@ extension QuickSight { case parameters = "Parameters" case sourceEntity = "SourceEntity" case themeArn = "ThemeArn" + case validationStrategy = "ValidationStrategy" case versionDescription = "VersionDescription" } } @@ -32002,18 +32077,21 @@ extension QuickSight { public let sourceEntity: TemplateSourceEntity? /// The ID for the template. public let templateId: String + /// The option to relax the validation needed to update a template with definition objects. This skips the validation step for specific errors. + public let validationStrategy: ValidationStrategy? /// A description of the current template version that is being updated. Every time you call /// UpdateTemplate, you create a new version of the template. Each version /// of the template maintains a description of the version in the /// VersionDescription field. public let versionDescription: String? - public init(awsAccountId: String, definition: TemplateVersionDefinition? = nil, name: String? = nil, sourceEntity: TemplateSourceEntity? = nil, templateId: String, versionDescription: String? = nil) { + public init(awsAccountId: String, definition: TemplateVersionDefinition? = nil, name: String? = nil, sourceEntity: TemplateSourceEntity? = nil, templateId: String, validationStrategy: ValidationStrategy? = nil, versionDescription: String? = nil) { self.awsAccountId = awsAccountId self.definition = definition self.name = name self.sourceEntity = sourceEntity self.templateId = templateId + self.validationStrategy = validationStrategy self.versionDescription = versionDescription } @@ -32036,6 +32114,7 @@ extension QuickSight { case definition = "Definition" case name = "Name" case sourceEntity = "SourceEntity" + case validationStrategy = "ValidationStrategy" case versionDescription = "VersionDescription" } } @@ -32935,6 +33014,19 @@ extension QuickSight { } } + public struct ValidationStrategy: AWSEncodableShape { + /// The mode of validation for the asset to be creaed or updated. When you set this value to STRICT, strict validation for every error is enforced. When you set this value to LENIENT, validation is skipped for specific UI errors. + public let mode: ValidationStrategyMode + + public init(mode: ValidationStrategyMode) { + self.mode = mode + } + + private enum CodingKeys: String, CodingKey { + case mode = "Mode" + } + } + public struct VisibleRangeOptions: AWSEncodableShape & AWSDecodableShape { /// The percent range in the visible range. public let percentRange: PercentVisibleRange? diff --git a/Sources/Soto/Services/RDS/RDS_api+async.swift b/Sources/Soto/Services/RDS/RDS_api+async.swift index 6a3c3df4c4..cc45b5f9ba 100644 --- a/Sources/Soto/Services/RDS/RDS_api+async.swift +++ b/Sources/Soto/Services/RDS/RDS_api+async.swift @@ -121,7 +121,7 @@ extension RDS { return try await self.client.execute(operation: "CreateDBInstance", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide. Amazon Aurora doesn't support this operation. Call the CreateDBInstance operation to create a DB instance for an Aurora DB cluster. All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified. Your source DB instance or cluster must have backup retention enabled. + /// Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide. Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation. All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified. Your source DB instance or cluster must have backup retention enabled. public func createDBInstanceReadReplica(_ input: CreateDBInstanceReadReplicaMessage, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateDBInstanceReadReplicaResult { return try await self.client.execute(operation: "CreateDBInstanceReadReplica", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -206,7 +206,7 @@ extension RDS { return try await self.client.execute(operation: "DeleteDBClusterSnapshot", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted. If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted. When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter. If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true: The DB cluster is a read replica of another Amazon Aurora DB cluster. The DB instance is the only instance in the DB cluster. To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a read replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster. + /// Deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. However, manual DB snapshots of the DB instance aren't deleted. If you request a final DB snapshot, the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. This operation can't be canceled or reverted after it begins. To monitor the status of this operation, use DescribeDBInstance. When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter. If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true: The DB cluster is a read replica of another Amazon Aurora DB cluster. The DB instance is the only instance in the DB cluster. To delete a DB instance in this case, first use the PromoteReadReplicaDBCluster operation to promote the DB cluster so that it's no longer a read replica. After the promotion completes, use the DeleteDBInstance operation to delete the final instance in the DB cluster. For RDS Custom DB instances, deleting the DB instance permanently deletes the EC2 instance and the associated EBS volumes. Make sure that you don't terminate or delete these resources before you delete the DB instance. Otherwise, deleting the DB instance and creation of the final snapshot might fail. public func deleteDBInstance(_ input: DeleteDBInstanceMessage, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteDBInstanceResult { return try await self.client.execute(operation: "DeleteDBInstance", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -321,7 +321,7 @@ extension RDS { return try await self.client.execute(operation: "DescribeDBClusters", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns a list of the available DB engines. + /// Describes the properties of specific versions of DB engines. public func describeDBEngineVersions(_ input: DescribeDBEngineVersionsMessage, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DBEngineVersionMessage { return try await self.client.execute(operation: "DescribeDBEngineVersions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -944,7 +944,7 @@ extension RDS { ) } - /// Returns a list of the available DB engines. + /// Describes the properties of specific versions of DB engines. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/RDS/RDS_api.swift b/Sources/Soto/Services/RDS/RDS_api.swift index acc730130a..9bf2ab041f 100644 --- a/Sources/Soto/Services/RDS/RDS_api.swift +++ b/Sources/Soto/Services/RDS/RDS_api.swift @@ -175,7 +175,7 @@ public struct RDS: AWSService { return self.client.execute(operation: "CreateDBInstance", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide. Amazon Aurora doesn't support this operation. Call the CreateDBInstance operation to create a DB instance for an Aurora DB cluster. All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified. Your source DB instance or cluster must have backup retention enabled. + /// Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide. Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation. All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified. Your source DB instance or cluster must have backup retention enabled. public func createDBInstanceReadReplica(_ input: CreateDBInstanceReadReplicaMessage, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateDBInstanceReadReplica", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -260,7 +260,7 @@ public struct RDS: AWSService { return self.client.execute(operation: "DeleteDBClusterSnapshot", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted. If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted. When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter. If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true: The DB cluster is a read replica of another Amazon Aurora DB cluster. The DB instance is the only instance in the DB cluster. To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a read replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster. + /// Deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. However, manual DB snapshots of the DB instance aren't deleted. If you request a final DB snapshot, the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. This operation can't be canceled or reverted after it begins. To monitor the status of this operation, use DescribeDBInstance. When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter. If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true: The DB cluster is a read replica of another Amazon Aurora DB cluster. The DB instance is the only instance in the DB cluster. To delete a DB instance in this case, first use the PromoteReadReplicaDBCluster operation to promote the DB cluster so that it's no longer a read replica. After the promotion completes, use the DeleteDBInstance operation to delete the final instance in the DB cluster. For RDS Custom DB instances, deleting the DB instance permanently deletes the EC2 instance and the associated EBS volumes. Make sure that you don't terminate or delete these resources before you delete the DB instance. Otherwise, deleting the DB instance and creation of the final snapshot might fail. public func deleteDBInstance(_ input: DeleteDBInstanceMessage, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteDBInstance", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -375,7 +375,7 @@ public struct RDS: AWSService { return self.client.execute(operation: "DescribeDBClusters", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns a list of the available DB engines. + /// Describes the properties of specific versions of DB engines. public func describeDBEngineVersions(_ input: DescribeDBEngineVersionsMessage, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeDBEngineVersions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -1285,7 +1285,7 @@ extension RDS { ) } - /// Returns a list of the available DB engines. + /// Describes the properties of specific versions of DB engines. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index b7a365f192..5559d51415 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -346,9 +346,9 @@ extension RDS { } public struct ApplyPendingMaintenanceActionMessage: AWSEncodableShape { - /// The pending maintenance action to apply to this resource. Valid values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation + /// The pending maintenance action to apply to this resource. Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation public let applyAction: String - /// A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone. Valid values: immediate - Apply the maintenance action immediately. next-maintenance - Apply the maintenance action during the next maintenance window for the resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. + /// A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone. Valid Values: immediate - Apply the maintenance action immediately. next-maintenance - Apply the maintenance action during the next maintenance window for the resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. public let optInType: String /// The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN). public let resourceIdentifier: String @@ -458,9 +458,9 @@ extension RDS { public let backtrackTo: Date /// The DB cluster identifier of the DB cluster to be backtracked. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster1 public let dbClusterIdentifier: String - /// A value that indicates whether to force the DB cluster to backtrack when binary logging is enabled. Otherwise, an error occurs when binary logging is enabled. + /// Specifies whether to force the DB cluster to backtrack when binary logging is enabled. Otherwise, an error occurs when binary logging is enabled. public let force: Bool? - /// A value that indicates whether to backtrack the DB cluster to the earliest possible backtrack time when BacktrackTo is set to a timestamp earlier than the earliest backtrack time. When this parameter is disabled and BacktrackTo is set to a timestamp earlier than the earliest backtrack time, an error occurs. + /// Specifies whether to backtrack the DB cluster to the earliest possible backtrack time when BacktrackTo is set to a timestamp earlier than the earliest backtrack time. When this parameter is disabled and BacktrackTo is set to a timestamp earlier than the earliest backtrack time, an error occurs. public let useEarliestTimeOnPointInTimeUnavailable: Bool? public init(backtrackTo: Date, dbClusterIdentifier: String, force: Bool? = nil, useEarliestTimeOnPointInTimeUnavailable: Bool? = nil) { @@ -572,7 +572,7 @@ extension RDS { public let certificateIdentifier: String? /// The type of the certificate. public let certificateType: String? - /// Whether there is an override for the default certificate identifier. + /// Indicates whether there is an override for the default certificate identifier. public let customerOverride: Bool? /// If there is an override for the default certificate identifier, when the override expires. public let customerOverrideValidTill: Date? @@ -692,7 +692,7 @@ extension RDS { public let dbClusterIdentifier: String? /// The database engine version. public let engineVersion: String? - /// A value that indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. + /// Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. public let iamDatabaseAuthenticationEnabled: Bool? /// The Provisioned IOPS (I/O operations per second) value. This setting is only for non-Aurora Multi-AZ DB clusters. public let iops: Int? @@ -728,13 +728,13 @@ extension RDS { } public struct ConnectionPoolConfiguration: AWSEncodableShape { - /// The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. Default: 120 Constraints: between 1 and 3600, or 0 representing unlimited + /// The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. For an unlimited wait time, specify 0. Default: 120 Constraints: Must be between 0 and 3600. public let connectionBorrowTimeout: Int? /// One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2. Default: no initialization query public let initQuery: String? - /// The maximum size of the connection pool for each target in a target group. The value is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. If you specify MaxIdleConnectionsPercent, then you must also include a value for this parameter. Default: 10 for RDS for Microsoft SQL Server, and 100 for all other engines Constraints: Must be between 1 and 100. + /// The maximum size of the connection pool for each target in a target group. The value is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. If you specify MaxIdleConnectionsPercent, then you must also include a value for this parameter. Default: 10 for RDS for Microsoft SQL Server, and 100 for all other engines Constraints: Must be between 1 and 100. public let maxConnectionsPercent: Int? - /// Controls how actively the proxy closes idle database connections in the connection pool. The value is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. With a high value, the proxy leaves a high percentage of idle database connections open. A low value causes the proxy to close more idle connections and return them to the database. If you specify this parameter, then you must also include a value for MaxConnectionsPercent. Default: The default value is half of the value of MaxConnectionsPercent. For example, if MaxConnectionsPercent is 80, then the default value of MaxIdleConnectionsPercent is 40. If the value of MaxConnectionsPercent isn't specified, then for SQL Server, MaxIdleConnectionsPercent is 5, and for all other engines, the default is 50. Constraints: Must be between 0 and the value of MaxConnectionsPercent. + /// A value that controls how actively the proxy closes idle database connections in the connection pool. The value is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. With a high value, the proxy leaves a high percentage of idle database connections open. A low value causes the proxy to close more idle connections and return them to the database. If you specify this parameter, then you must also include a value for MaxConnectionsPercent. Default: The default value is half of the value of MaxConnectionsPercent. For example, if MaxConnectionsPercent is 80, then the default value of MaxIdleConnectionsPercent is 40. If the value of MaxConnectionsPercent isn't specified, then for SQL Server, MaxIdleConnectionsPercent is 5, and for all other engines, the default is 50. Constraints: Must be between 0 and the value of MaxConnectionsPercent. public let maxIdleConnectionsPercent: Int? /// Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Default: no session pinning filters @OptionalCustomCoding @@ -829,7 +829,7 @@ extension RDS { public struct CopyDBClusterSnapshotMessage: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// A value that indicates whether to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot. By default, tags are not copied. + /// Specifies whether to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot. By default, tags are not copied. public let copyTags: Bool? /// The Amazon Web Services KMS key identifier for an encrypted DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the Amazon Web Services KMS key. If you copy an encrypted DB cluster snapshot from your Amazon Web Services account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot. If you copy an encrypted DB cluster snapshot that is shared from another Amazon Web Services account, then you must specify a value for KmsKeyId. To copy an encrypted DB cluster snapshot to another Amazon Web Services Region, you must set KmsKeyId to the Amazon Web Services KMS key identifier you want to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region. If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned. public let kmsKeyId: String? @@ -915,9 +915,9 @@ extension RDS { public struct CopyDBSnapshotMessage: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// A value that indicates whether to copy the DB option group associated with the source DB snapshot to the target Amazon Web Services account and associate with the target DB snapshot. The associated option group can be copied only with cross-account snapshot copy calls. + /// Specifies whether to copy the DB option group associated with the source DB snapshot to the target Amazon Web Services account and associate with the target DB snapshot. The associated option group can be copied only with cross-account snapshot copy calls. public let copyOptionGroup: Bool? - /// A value that indicates whether to copy all tags from the source DB snapshot to the target DB snapshot. By default, tags aren't copied. + /// Specifies whether to copy all tags from the source DB snapshot to the target DB snapshot. By default, tags aren't copied. public let copyTags: Bool? /// The Amazon Web Services KMS key identifier for an encrypted DB snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you copy an encrypted DB snapshot from your Amazon Web Services account, you can specify a value for this parameter to encrypt the copy with a new KMS key. If you don't specify a value for this parameter, then the copy of the DB snapshot is encrypted with the same Amazon Web Services KMS key as the source DB snapshot. If you copy an encrypted DB snapshot that is shared from another Amazon Web Services account, then you must specify a value for this parameter. If you specify this parameter when you copy an unencrypted snapshot, the copy is encrypted. If you copy an encrypted snapshot to a different Amazon Web Services Region, then you must specify an Amazon Web Services KMS key identifier for the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region. public let kmsKeyId: String? @@ -1544,6 +1544,8 @@ extension RDS { public let dbSubnetGroupName: String? /// The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term "Oracle database instance" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to RDSCDB. The Oracle SID is also the name of your CDB. public let dbSystemId: String? + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled. + public let dedicatedLogVolume: Bool? /// Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. This setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster. public let deletionProtection: Bool? /// The Active Directory directory ID to create the DB instance in. Currently, only Microsoft SQL Server, MySQL, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to the following DB instances: Amazon Aurora (The domain is managed by the DB cluster.) RDS Custom @@ -1582,7 +1584,7 @@ extension RDS { public let manageMasterUserPassword: Bool? /// The name for the master user. This setting doesn't apply to Amazon Aurora DB instances. The name for the master user is managed by the DB cluster. This setting is required for RDS DB instances. Constraints: Must be 1 to 16 letters, numbers, or underscores. First character must be a letter. Can't be a reserved word for the chosen database engine. public let masterUsername: String? - /// The password for the master user. This setting doesn't apply to Amazon Aurora DB instances. The password for the master user is managed by the DB cluster. Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include any printable ASCII character except "/", """, or "@". Length Constraints: RDS for MariaDB - Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must contain from 8 to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. RDS for Oracle - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 characters. + /// The password for the master user. This setting doesn't apply to Amazon Aurora DB instances. The password for the master user is managed by the DB cluster. Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include any printable ASCII character except "/", """, or "@". For RDS for Oracle, can't include the "&" (ampersand) or the "'" (single quotes) character. Length Constraints: RDS for MariaDB - Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must contain from 8 to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. RDS for Oracle - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 characters. public let masterUserPassword: String? /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB instance. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. public let masterUserSecretKmsKeyId: String? @@ -1636,7 +1638,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceClass: String, dbInstanceIdentifier: String, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, timezone: String? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceClass: String, dbInstanceIdentifier: String, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, timezone: String? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -1654,6 +1656,7 @@ extension RDS { self.dbSecurityGroups = dbSecurityGroups self.dbSubnetGroupName = dbSubnetGroupName self.dbSystemId = dbSystemId + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.domain = domain self.domainAuthSecretArn = domainAuthSecretArn @@ -1717,6 +1720,7 @@ extension RDS { case dbSecurityGroups = "DBSecurityGroups" case dbSubnetGroupName = "DBSubnetGroupName" case dbSystemId = "DBSystemId" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case domain = "Domain" case domainAuthSecretArn = "DomainAuthSecretArn" @@ -1770,25 +1774,27 @@ extension RDS { /// The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? - /// A value that indicates whether minor engine upgrades are applied automatically to the read replica during the maintenance window. This setting doesn't apply to RDS Custom. Default: Inherits from the source DB instance + /// Specifies whether to automatically apply minor engine upgrades to the read replica during the maintenance window. This setting doesn't apply to RDS Custom DB instances. Default: Inherits the value from the source DB instance. public let autoMinorVersionUpgrade: Bool? /// The Availability Zone (AZ) where the read replica will be created. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d public let availabilityZone: String? - /// A value that indicates whether to copy all tags from the read replica to snapshots of the read replica. By default, tags are not copied. + /// Specifies whether to copy all tags from the read replica to snapshots of the read replica. By default, tags aren't copied. public let copyTagsToSnapshot: Bool? - /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom. + /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom DB instances. public let customIamInstanceProfile: String? - /// The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits from the source DB instance. + /// The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the value from the source DB instance. public let dbInstanceClass: String? /// The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. public let dbInstanceIdentifier: String - /// The name of the DB parameter group to associate with this DB instance. If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. It isn't supported for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens + /// The name of the DB parameter group to associate with this DB instance. If you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. public let dbParameterGroupName: String? - /// Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC. Constraints: If supplied, must match the name of an existing DBSubnetGroup. The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running. All read replicas in one Amazon Web Services Region that are created from the same source DB instance must either:> Specify DB subnet groups from the same VPC. All these read replicas are created in the same VPC. Not specify a DB subnet group. All these read replicas are created outside of any VPC. Example: mydbsubnetgroup + /// A DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC. Constraints: If supplied, must match the name of an existing DB subnet group. The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running. All read replicas in one Amazon Web Services Region that are created from the same source DB instance must either: Specify DB subnet groups from the same VPC. All these read replicas are created in the same VPC. Not specify a DB subnet group. All these read replicas are created outside of any VPC. Example: mydbsubnetgroup public let dbSubnetGroupName: String? - /// A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled. + public let dedicatedLogVolume: Bool? + /// Specifies whether to enable deletion protection for the DB instance. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. public let deletionProtection: Bool? - /// The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + /// The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB instances. public let domain: String? /// The ARN for the Secrets Manager secret with the credentials for the user joining the domain. Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 public let domainAuthSecretArn: String? @@ -1797,47 +1803,47 @@ extension RDS { public var domainDnsIps: [String]? /// The fully qualified domain name (FQDN) of an Active Directory domain. Constraints: Can't be longer than 64 characters. Example: mymanagedADtest.mymanagedAD.mydomain public let domainFqdn: String? - /// The name of the IAM role to be used when making API calls to the Directory Service. This setting doesn't apply to RDS Custom. + /// The name of the IAM role to use when making API calls to the Directory Service. This setting doesn't apply to RDS Custom DB instances. public let domainIAMRoleName: String? /// The Active Directory organizational unit for your DB instance to join. Constraints: Must be in the distinguished name format. Can't be longer than 64 characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain public let domainOu: String? - /// The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + /// The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB instances. @OptionalCustomCoding public var enableCloudwatchLogsExports: [String]? - /// A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts read replica. A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the read replica from outside of its virtual private cloud (VPC) on your local network. For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. + /// Specifies whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts read replica. A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the read replica from outside of its virtual private cloud (VPC) on your local network. For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. public let enableCustomerOwnedIp: Bool? - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB instances. public let enableIAMDatabaseAuthentication: Bool? - /// A value that indicates whether to enable Performance Insights for the read replica. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + /// Specifies whether to enable Performance Insights for the read replica. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB instances. public let enablePerformanceInsights: Bool? - /// The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. + /// The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. public let iops: Int? /// The Amazon Web Services KMS key identifier for an encrypted read replica. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you create an encrypted read replica in the same Amazon Web Services Region as the source DB instance or Multi-AZ DB cluster, don't specify a value for this parameter. A read replica in the same Amazon Web Services Region is always encrypted with the same KMS key as the source DB instance or cluster. If you create an encrypted read replica in a different Amazon Web Services Region, then you must specify a KMS key identifier for the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region. You can't create an encrypted read replica from an unencrypted DB instance or Multi-AZ DB cluster. This setting doesn't apply to RDS Custom, which uses the same KMS key as the primary replica. public let kmsKeyId: String? /// The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. public let maxAllocatedStorage: Int? - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the read replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom. Valid Values: 0, 1, 5, 10, 15, 30, 60 + /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the read replica. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must set MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom DB instances. Valid Values: 0, 1, 5, 10, 15, 30, 60 Default: 0 public let monitoringInterval: Int? - /// The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. This setting doesn't apply to RDS Custom. + /// The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. This setting doesn't apply to RDS Custom DB instances. public let monitoringRoleArn: String? - /// A value that indicates whether the read replica is in a Multi-AZ deployment. You can create a read replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your read replica as a Multi-AZ DB instance is independent of whether the source is a Multi-AZ DB instance or a Multi-AZ DB cluster. This setting doesn't apply to RDS Custom. + /// Specifies whether the read replica is in a Multi-AZ deployment. You can create a read replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your read replica as a Multi-AZ DB instance is independent of whether the source is a Multi-AZ DB instance or a Multi-AZ DB cluster. This setting doesn't apply to RDS Custom DB instances. public let multiAZ: Bool? - /// The network type of the DB instance. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for read replica. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. + /// The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for read replica. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. public let networkType: String? - /// The option group the DB instance is associated with. If omitted, the option group associated with the source instance or cluster is used. For SQL Server, you must use the option group associated with the source. This setting doesn't apply to RDS Custom. + /// The option group to associate the DB instance with. If not specified, RDS uses the option group associated with the source DB instance or cluster. For SQL Server, you must use the option group associated with the source. This setting doesn't apply to RDS Custom DB instances. public let optionGroupName: String? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. This setting doesn't apply to RDS Custom. + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. This setting doesn't apply to RDS Custom DB instances. public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. The default is 7 days. The following values are valid: 7 month * 31, where month is a number of months from 1-23 731 For example, the following values are valid: 93 (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a retention period such as 94, which isn't a valid value, RDS issues an error. This setting doesn't apply to RDS Custom. + /// The number of days to retain Performance Insights data. This setting doesn't apply to RDS Custom DB instances. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS returns an error. public let performanceInsightsRetentionPeriod: Int? - /// The port number that the DB instance uses for connections. Default: Inherits from the source DB instance Valid Values: 1150-65535 + /// The port number that the DB instance uses for connections. Valid Values: 1150-65535 Default: Inherits the value from the source DB instance. public let port: Int? - /// When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance. This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions. This setting applies only when replicating from a source DB instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica operation is called that contains this presigned URL. For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 Amazon Web Services Region. KmsKeyId - The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process. If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas. This setting doesn't apply to RDS Custom. + /// When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance. This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions. This setting applies only when replicating from a source DB instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region. The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values: DestinationRegion - The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica operation is called that contains this presigned URL. For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 Amazon Web Services Region. KmsKeyId - The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process. If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas. This setting doesn't apply to RDS Custom DB instances. public let preSignedUrl: String? - /// The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. This setting doesn't apply to RDS Custom. + /// The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. This setting doesn't apply to RDS Custom DB instances. @OptionalCustomCoding> public var processorFeatures: [ProcessorFeature]? - /// A value that indicates whether the DB instance is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. + /// Specifies whether the DB instance is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. public let publiclyAccessible: Bool? /// The open mode of the replica database: mounted or read-only. This parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide. For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually. public let replicaMode: ReplicaMode? @@ -1845,19 +1851,19 @@ extension RDS { public let sourceDBClusterIdentifier: String? /// The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five. Constraints: Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server DB instance. Can't be specified if the SourceDBClusterIdentifier parameter is also specified. For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas in the Amazon RDS User Guide. For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide. The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0. If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier. If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas. public let sourceDBInstanceIdentifier: String? - /// Specifies the storage throughput value for the read replica. This setting doesn't apply to RDS Custom or Amazon Aurora. + /// Specifies the storage throughput value for the read replica. This setting doesn't apply to RDS Custom or Amazon Aurora DB instances. public let storageThroughput: Int? - /// Specifies the storage type to be associated with the read replica. Valid values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise gp2 + /// The storage type to associate with the read replica. If you specify io1 or gp3, you must also include a value for the Iops parameter. Valid Values: gp2 | gp3 | io1 | standard Default: io1 if the Iops parameter is specified. Otherwise, gp2. public let storageType: String? @OptionalCustomCoding> public var tags: [Tag]? - /// A value that indicates whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom. + /// Specifies whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom DB instances. public let useDefaultProcessorFeatures: Bool? - /// A list of Amazon EC2 VPC security groups to associate with the read replica. This setting doesn't apply to RDS Custom. Default: The default EC2 VPC security group for the DB subnet group's VPC. + /// A list of Amazon EC2 VPC security groups to associate with the read replica. This setting doesn't apply to RDS Custom DB instances. Default: The default EC2 VPC security group for the DB subnet group's VPC. @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String, dbParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, iops: Int? = nil, kmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preSignedUrl: String? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, replicaMode: ReplicaMode? = nil, sourceDBClusterIdentifier: String? = nil, sourceDBInstanceIdentifier: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String, dbParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, iops: Int? = nil, kmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preSignedUrl: String? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, replicaMode: ReplicaMode? = nil, sourceDBClusterIdentifier: String? = nil, sourceDBInstanceIdentifier: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -1867,6 +1873,7 @@ extension RDS { self.dbInstanceIdentifier = dbInstanceIdentifier self.dbParameterGroupName = dbParameterGroupName self.dbSubnetGroupName = dbSubnetGroupName + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.domain = domain self.domainAuthSecretArn = domainAuthSecretArn @@ -1912,6 +1919,7 @@ extension RDS { case dbInstanceIdentifier = "DBInstanceIdentifier" case dbParameterGroupName = "DBParameterGroupName" case dbSubnetGroupName = "DBSubnetGroupName" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case domain = "Domain" case domainAuthSecretArn = "DomainAuthSecretArn" @@ -2021,7 +2029,7 @@ extension RDS { public let dbProxyName: String @OptionalCustomCoding> public var tags: [Tag]? - /// A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations. The default is READ_WRITE. The only role that proxies for RDS for Microsoft SQL Server support is READ_WRITE. + /// The role of the DB proxy endpoint. The role determines whether the endpoint can be used for read/write or only read operations. The default is READ_WRITE. The only role that proxies for RDS for Microsoft SQL Server support is READ_WRITE. public let targetRole: DBProxyEndpointTargetRole? /// The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC. @OptionalCustomCoding @@ -2079,13 +2087,13 @@ extension RDS { public var auth: [UserAuthConfig] /// The identifier for the proxy. This name must be unique for all proxies owned by your Amazon Web Services account in the specified Amazon Web Services Region. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. public let dbProxyName: String - /// Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. + /// Specifies whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. public let debugLogging: Bool? /// The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL. For RDS for Microsoft SQL Server, specify SQLSERVER. public let engineFamily: EngineFamily /// The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database. public let idleClientTimeout: Int? - /// A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy. + /// Specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy. public let requireTLS: Bool? /// The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in Amazon Web Services Secrets Manager. public let roleArn: String @@ -2257,7 +2265,7 @@ extension RDS { public struct _SourceIdsEncoding: ArrayCoderProperties { public static let member = "SourceId" } public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// A value that indicates whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active. + /// Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active. public let enabled: Bool? /// A list of event categories for a particular source type (SourceType) that you want to subscribe to. You can see a list of the categories for a given source type in the "Amazon RDS event categories and event messages" section of the Amazon RDS User Guide or the Amazon Aurora User Guide . You can also see this list by using the DescribeEventCategories operation. @OptionalCustomCoding> @@ -2267,7 +2275,7 @@ extension RDS { /// The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens. Constraints: If SourceIds are supplied, SourceType must also be provided. If the source type is a DB instance, a DBInstanceIdentifier value must be supplied. If the source type is a DB cluster, a DBClusterIdentifier value must be supplied. If the source type is a DB parameter group, a DBParameterGroupName value must be supplied. If the source type is a DB security group, a DBSecurityGroupName value must be supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier value must be supplied. If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier value must be supplied. If the source type is an RDS Proxy, a DBProxyName value must be supplied. @OptionalCustomCoding> public var sourceIds: [String]? - /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy public let sourceType: String? /// The name of the subscription. Constraints: The name must be less than 255 characters. public let subscriptionName: String @@ -2512,7 +2520,7 @@ extension RDS { public let engineMode: String? /// The version of the database engine. public let engineVersion: String? - /// Specifies whether write forwarding is enabled for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster. + /// Indicates whether write forwarding is enabled for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster. public let globalWriteForwardingRequested: Bool? /// The status of write forwarding for a secondary cluster in an Aurora global database. public let globalWriteForwardingStatus: WriteForwardingStatus? @@ -2530,7 +2538,7 @@ extension RDS { public let kmsKeyId: String? /// The latest time to which a database can be restored with point-in-time restore. public let latestRestorableTime: Date? - /// Specifies whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process of enabling it. + /// Indicates whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process of enabling it. public let localWriteForwardingStatus: LocalWriteForwardingStatus? /// The master username for the DB cluster. public let masterUsername: String? @@ -2766,7 +2774,7 @@ extension RDS { public let engineMode: String? /// The version of the database engine for the automated backup. public let engineVersion: String? - /// True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false. + /// Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. public let iamDatabaseAuthenticationEnabled: Bool? /// The IOPS (I/O operations per second) value for the automated backup. This setting is only for non-Aurora Multi-AZ DB clusters. public let iops: Int? @@ -2783,7 +2791,7 @@ extension RDS { public let restoreWindow: RestoreWindow? /// A list of status information for an automated backup: retained - Automated backups for deleted clusters. public let status: String? - /// Specifies whether the source DB cluster is encrypted. + /// Indicates whether the source DB cluster is encrypted. public let storageEncrypted: Bool? /// The storage type associated with the DB cluster. This setting is only for non-Aurora Multi-AZ DB clusters. public let storageType: String? @@ -3023,7 +3031,7 @@ extension RDS { public let dbClusterParameterGroupStatus: String? /// Specifies the instance identifier for this member of the DB cluster. public let dbInstanceIdentifier: String? - /// Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise. + /// Indicates whether the cluster member is the primary DB instance for the DB cluster. public let isClusterWriter: Bool? /// A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. public let promotionTier: Int? @@ -3183,56 +3191,56 @@ extension RDS { public struct _AvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "AvailabilityZone" } public struct _TagListEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// Specifies the allocated storage size in gibibytes (GiB). + /// The allocated storage size of the DB cluster snapshot in gibibytes (GiB). public let allocatedStorage: Int? - /// Provides the list of Availability Zones (AZs) where instances in the DB cluster snapshot can be restored. + /// The list of Availability Zones (AZs) where instances in the DB cluster snapshot can be restored. @OptionalCustomCoding> public var availabilityZones: [String]? - /// Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC). + /// The time when the DB cluster was created, in Universal Coordinated Time (UTC). public let clusterCreateTime: Date? - /// Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from. + /// The DB cluster identifier of the DB cluster that this DB cluster snapshot was created from. public let dbClusterIdentifier: String? - /// Specifies the resource ID of the DB cluster that this DB cluster snapshot was created from. + /// The resource ID of the DB cluster that this DB cluster snapshot was created from. public let dbClusterResourceId: String? - /// Specifies the Amazon Resource Name (ARN) for the DB cluster snapshot. + /// The Amazon Resource Name (ARN) for the DB cluster snapshot. public let dbClusterSnapshotArn: String? - /// Specifies the identifier for the DB cluster snapshot. + /// The identifier for the DB cluster snapshot. public let dbClusterSnapshotIdentifier: String? /// Reserved for future use. public let dbSystemId: String? - /// Specifies the name of the database engine for this DB cluster snapshot. + /// The name of the database engine for this DB cluster snapshot. public let engine: String? - /// Provides the engine mode of the database engine for this DB cluster snapshot. + /// The engine mode of the database engine for this DB cluster snapshot. public let engineMode: String? - /// Provides the version of the database engine for this DB cluster snapshot. + /// The version of the database engine for this DB cluster snapshot. public let engineVersion: String? - /// True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false. + /// Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. public let iamDatabaseAuthenticationEnabled: Bool? /// If StorageEncrypted is true, the Amazon Web Services KMS key identifier for the encrypted DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. public let kmsKeyId: String? - /// Provides the license model information for this DB cluster snapshot. + /// The license model information for this DB cluster snapshot. public let licenseModel: String? - /// Provides the master username for this DB cluster snapshot. + /// The master username for this DB cluster snapshot. public let masterUsername: String? - /// Specifies the percentage of the estimated data that has been transferred. + /// The percentage of the estimated data that has been transferred. public let percentProgress: Int? - /// Specifies the port that the DB cluster was listening on at the time of the snapshot. + /// The port that the DB cluster was listening on at the time of the snapshot. public let port: Int? - /// Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). + /// The time when the snapshot was taken, in Universal Coordinated Time (UTC). public let snapshotCreateTime: Date? - /// Provides the type of the DB cluster snapshot. + /// The type of the DB cluster snapshot. public let snapshotType: String? /// If the DB cluster snapshot was copied from a source DB cluster snapshot, the Amazon Resource Name (ARN) for the source DB cluster snapshot, otherwise, a null value. public let sourceDBClusterSnapshotArn: String? - /// Specifies the status of this DB cluster snapshot. Valid statuses are the following: available copying creating + /// The status of this DB cluster snapshot. Valid statuses are the following: available copying creating public let status: String? - /// Specifies whether the DB cluster snapshot is encrypted. + /// Indicates whether the DB cluster snapshot is encrypted. public let storageEncrypted: Bool? /// The storage type associated with the DB cluster snapshot. This setting is only for Aurora DB clusters. public let storageType: String? @OptionalCustomCoding> public var tagList: [Tag]? - /// Provides the VPC ID associated with the DB cluster snapshot. + /// The VPC ID associated with the DB cluster snapshot. public let vpcId: String? public init(allocatedStorage: Int? = nil, availabilityZones: [String]? = nil, clusterCreateTime: Date? = nil, dbClusterIdentifier: String? = nil, dbClusterResourceId: String? = nil, dbClusterSnapshotArn: String? = nil, dbClusterSnapshotIdentifier: String? = nil, dbSystemId: String? = nil, engine: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, masterUsername: String? = nil, percentProgress: Int? = nil, port: Int? = nil, snapshotCreateTime: Date? = nil, snapshotType: String? = nil, sourceDBClusterSnapshotArn: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { @@ -3412,17 +3420,17 @@ extension RDS { /// A list of the time zones supported by this engine for the Timezone parameter of the CreateDBInstance action. @OptionalCustomCoding> public var supportedTimezones: [Timezone]? - /// A value that indicates whether the engine version supports Babelfish for Aurora PostgreSQL. + /// Indicates whether the engine version supports Babelfish for Aurora PostgreSQL. public let supportsBabelfish: Bool? - /// A value that indicates whether the engine version supports rotating the server certificate without rebooting the DB instance. + /// Indicates whether the engine version supports rotating the server certificate without rebooting the DB instance. public let supportsCertificateRotationWithoutRestart: Bool? - /// A value that indicates whether you can use Aurora global databases with a specific DB engine version. + /// Indicates whether you can use Aurora global databases with a specific DB engine version. public let supportsGlobalDatabases: Bool? - /// A value that indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only + /// Indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only public let supportsLocalWriteForwarding: Bool? - /// A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs. + /// Indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs. public let supportsLogExportsToCloudwatchLogs: Bool? - /// A value that indicates whether you can use Aurora parallel query with a specific DB engine version. + /// Indicates whether you can use Aurora parallel query with a specific DB engine version. public let supportsParallelQuery: Bool? /// Indicates whether the database engine version supports read replicas. public let supportsReadReplica: Bool? @@ -3609,6 +3617,8 @@ extension RDS { public let dbSubnetGroup: DBSubnetGroup? /// The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle SID is also the name of the CDB. This setting is only valid for RDS Custom DB instances. public let dbSystemId: String? + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled. + public let dedicatedLogVolume: Bool? /// Indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. For more information, see Deleting a DB Instance. public let deletionProtection: Bool? /// The Active Directory Domain membership records associated with the DB instance. @@ -3714,7 +3724,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroups: [VpcSecurityGroupMembership]? - public init(activityStreamEngineNativeAuditFieldsIncluded: Bool? = nil, activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamPolicyStatus: ActivityStreamPolicyStatus? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBInstanceRole]? = nil, automaticRestartTime: Date? = nil, automationMode: AutomationMode? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customerOwnedIpEnabled: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceArn: String? = nil, dbInstanceAutomatedBackupsReplications: [DBInstanceAutomatedBackupsReplication]? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbInstancePort: Int? = nil, dbInstanceStatus: String? = nil, dbiResourceId: String? = nil, dbName: String? = nil, dbParameterGroups: [DBParameterGroupStatus]? = nil, dbSecurityGroups: [DBSecurityGroupMembership]? = nil, dbSubnetGroup: DBSubnetGroup? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: Endpoint? = nil, engine: String? = nil, engineVersion: String? = nil, enhancedMonitoringResourceArn: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, licenseModel: String? = nil, listenerEndpoint: Endpoint? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupMemberships: [OptionGroupMembership]? = nil, pendingModifiedValues: PendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, readReplicaDBClusterIdentifiers: [String]? = nil, readReplicaDBInstanceIdentifiers: [String]? = nil, readReplicaSourceDBClusterIdentifier: String? = nil, readReplicaSourceDBInstanceIdentifier: String? = nil, replicaMode: ReplicaMode? = nil, resumeFullAutomationModeTime: Date? = nil, secondaryAvailabilityZone: String? = nil, statusInfos: [DBInstanceStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(activityStreamEngineNativeAuditFieldsIncluded: Bool? = nil, activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamPolicyStatus: ActivityStreamPolicyStatus? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBInstanceRole]? = nil, automaticRestartTime: Date? = nil, automationMode: AutomationMode? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customerOwnedIpEnabled: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceArn: String? = nil, dbInstanceAutomatedBackupsReplications: [DBInstanceAutomatedBackupsReplication]? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbInstancePort: Int? = nil, dbInstanceStatus: String? = nil, dbiResourceId: String? = nil, dbName: String? = nil, dbParameterGroups: [DBParameterGroupStatus]? = nil, dbSecurityGroups: [DBSecurityGroupMembership]? = nil, dbSubnetGroup: DBSubnetGroup? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: Endpoint? = nil, engine: String? = nil, engineVersion: String? = nil, enhancedMonitoringResourceArn: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, licenseModel: String? = nil, listenerEndpoint: Endpoint? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupMemberships: [OptionGroupMembership]? = nil, pendingModifiedValues: PendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, readReplicaDBClusterIdentifiers: [String]? = nil, readReplicaDBInstanceIdentifiers: [String]? = nil, readReplicaSourceDBClusterIdentifier: String? = nil, readReplicaSourceDBInstanceIdentifier: String? = nil, replicaMode: ReplicaMode? = nil, resumeFullAutomationModeTime: Date? = nil, secondaryAvailabilityZone: String? = nil, statusInfos: [DBInstanceStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.activityStreamEngineNativeAuditFieldsIncluded = activityStreamEngineNativeAuditFieldsIncluded self.activityStreamKinesisStreamName = activityStreamKinesisStreamName self.activityStreamKmsKeyId = activityStreamKmsKeyId @@ -3749,6 +3759,7 @@ extension RDS { self.dbSecurityGroups = dbSecurityGroups self.dbSubnetGroup = dbSubnetGroup self.dbSystemId = dbSystemId + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.domainMemberships = domainMemberships self.enabledCloudwatchLogsExports = enabledCloudwatchLogsExports @@ -3834,6 +3845,7 @@ extension RDS { case dbSecurityGroups = "DBSecurityGroups" case dbSubnetGroup = "DBSubnetGroup" case dbSystemId = "DBSystemId" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case domainMemberships = "DomainMemberships" case enabledCloudwatchLogsExports = "EnabledCloudwatchLogsExports" @@ -3888,7 +3900,7 @@ extension RDS { public struct DBInstanceAutomatedBackup: AWSDecodableShape { public struct _DBInstanceAutomatedBackupsReplicationsEncoding: ArrayCoderProperties { public static let member = "DBInstanceAutomatedBackupsReplication" } - /// Specifies the allocated storage size in gibibytes (GiB). + /// The allocated storage size for the the automated backup in gibibytes (GiB). public let allocatedStorage: Int? /// The Availability Zone that the automated backup was created in. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones. public let availabilityZone: String? @@ -3896,7 +3908,7 @@ extension RDS { public let awsBackupRecoveryPointArn: String? /// The retention period for the automated backups. public let backupRetentionPeriod: Int? - /// Specifies where automated backups are stored: Amazon Web Services Outposts or the Amazon Web Services Region. + /// The location where automated backups are stored: Amazon Web Services Outposts or the Amazon Web Services Region. public let backupTarget: String? /// The Amazon Resource Name (ARN) for the automated backups. public let dbInstanceArn: String? @@ -3909,7 +3921,9 @@ extension RDS { public let dbInstanceIdentifier: String? /// The resource ID for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region. public let dbiResourceId: String? - /// Specifies whether the automated backup is encrypted. + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled. + public let dedicatedLogVolume: Bool? + /// Indicates whether the automated backup is encrypted. public let encrypted: Bool? /// The name of the database engine for this automated backup. public let engine: String? @@ -3917,13 +3931,13 @@ extension RDS { public let engineVersion: String? /// True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false. public let iamDatabaseAuthenticationEnabled: Bool? - /// Provides the date and time that the DB instance was created. + /// The date and time when the DB instance was created. public let instanceCreateTime: Date? /// The IOPS (I/O operations per second) value for the automated backup. public let iops: Int? /// The Amazon Web Services KMS key ID for an automated backup. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. public let kmsKeyId: String? - /// License model information for the automated backup. + /// The license model information for the automated backup. public let licenseModel: String? /// The master user name of an automated backup. public let masterUsername: String? @@ -3933,22 +3947,22 @@ extension RDS { public let port: Int? /// The Amazon Web Services Region associated with the automated backup. public let region: String? - /// Earliest and latest time an instance can be restored to. + /// The earliest and latest time a DB instance can be restored to. public let restoreWindow: RestoreWindow? - /// Provides a list of status information for an automated backup: active - Automated backups for current instances. retained - Automated backups for deleted instances. creating - Automated backups that are waiting for the first automated snapshot to be available. + /// A list of status information for an automated backup: active - Automated backups for current instances. retained - Automated backups for deleted instances. creating - Automated backups that are waiting for the first automated snapshot to be available. public let status: String? - /// Specifies the storage throughput for the automated backup. + /// The storage throughput for the automated backup. public let storageThroughput: Int? - /// Specifies the storage type associated with the automated backup. + /// The storage type associated with the automated backup. public let storageType: String? /// The ARN from the key store with which the automated backup is associated for TDE encryption. public let tdeCredentialArn: String? /// The time zone of the automated backup. In most cases, the Timezone element is empty. Timezone content appears only for Microsoft SQL Server DB instances that were created with a time zone specified. public let timezone: String? - /// Provides the VPC ID associated with the DB instance. + /// The VPC ID associated with the DB instance. public let vpcId: String? - public init(allocatedStorage: Int? = nil, availabilityZone: String? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, dbInstanceArn: String? = nil, dbInstanceAutomatedBackupsArn: String? = nil, dbInstanceAutomatedBackupsReplications: [DBInstanceAutomatedBackupsReplication]? = nil, dbInstanceIdentifier: String? = nil, dbiResourceId: String? = nil, encrypted: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, masterUsername: String? = nil, optionGroupName: String? = nil, port: Int? = nil, region: String? = nil, restoreWindow: RestoreWindow? = nil, status: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcId: String? = nil) { + public init(allocatedStorage: Int? = nil, availabilityZone: String? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, dbInstanceArn: String? = nil, dbInstanceAutomatedBackupsArn: String? = nil, dbInstanceAutomatedBackupsReplications: [DBInstanceAutomatedBackupsReplication]? = nil, dbInstanceIdentifier: String? = nil, dbiResourceId: String? = nil, dedicatedLogVolume: Bool? = nil, encrypted: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, masterUsername: String? = nil, optionGroupName: String? = nil, port: Int? = nil, region: String? = nil, restoreWindow: RestoreWindow? = nil, status: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcId: String? = nil) { self.allocatedStorage = allocatedStorage self.availabilityZone = availabilityZone self.awsBackupRecoveryPointArn = awsBackupRecoveryPointArn @@ -3959,6 +3973,7 @@ extension RDS { self.dbInstanceAutomatedBackupsReplications = dbInstanceAutomatedBackupsReplications self.dbInstanceIdentifier = dbInstanceIdentifier self.dbiResourceId = dbiResourceId + self.dedicatedLogVolume = dedicatedLogVolume self.encrypted = encrypted self.engine = engine self.engineVersion = engineVersion @@ -3991,6 +4006,7 @@ extension RDS { case dbInstanceAutomatedBackupsReplications = "DBInstanceAutomatedBackupsReplications" case dbInstanceIdentifier = "DBInstanceIdentifier" case dbiResourceId = "DbiResourceId" + case dedicatedLogVolume = "DedicatedLogVolume" case encrypted = "Encrypted" case engine = "Engine" case engineVersion = "EngineVersion" @@ -4071,7 +4087,7 @@ extension RDS { public let featureName: String? /// The Amazon Resource Name (ARN) of the IAM role that is associated with the DB instance. public let roleArn: String? - /// Describes the state of association between the IAM role and the DB instance. The Status property returns one of the following values: ACTIVE - the IAM role ARN is associated with the DB instance and can be used to access other Amazon Web Services services on your behalf. PENDING - the IAM role ARN is being associated with the DB instance. INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable to assume the IAM role in order to access other Amazon Web Services services on your behalf. + /// Information about the state of association between the IAM role and the DB instance. The Status property returns one of the following values: ACTIVE - the IAM role ARN is associated with the DB instance and can be used to access other Amazon Web Services services on your behalf. PENDING - the IAM role ARN is being associated with the DB instance. INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable to assume the IAM role in order to access other Amazon Web Services services on your behalf. public let status: String? public init(featureName: String? = nil, roleArn: String? = nil, status: String? = nil) { @@ -4090,9 +4106,9 @@ extension RDS { public struct DBInstanceStatusInfo: AWSDecodableShape { /// Details of the error if there is an error for the instance. If the instance isn't in an error state, this value is blank. public let message: String? - /// Boolean value that is true if the instance is operating normally, or false if the instance is in an error state. + /// A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state. public let normal: Bool? - /// Status of the DB instance. For a StatusType of read replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated. + /// The status of the DB instance. For a StatusType of read replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated. public let status: String? /// This value is currently "read replication." public let statusType: String? @@ -4217,7 +4233,7 @@ extension RDS { public let dbProxyArn: String? /// The identifier for the proxy. This name must be unique for all proxies owned by your Amazon Web Services account in the specified Amazon Web Services Region. public let dbProxyName: String? - /// Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. + /// Indicates whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. public let debugLogging: Bool? /// The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application. public let endpoint: String? @@ -4290,7 +4306,7 @@ extension RDS { public let dbProxyName: String? /// The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application. public let endpoint: String? - /// A value that indicates whether this endpoint is the default endpoint for the associated DB proxy. Default DB proxy endpoints always have read/write capability. Other endpoints that you associate with the DB proxy can be either read/write or read-only. + /// Indicates whether this endpoint is the default endpoint for the associated DB proxy. Default DB proxy endpoints always have read/write capability. Other endpoints that you associate with the DB proxy can be either read/write or read-only. public let isDefault: Bool? /// The current status of this DB proxy endpoint. A status of available means the endpoint is ready to handle requests. Other values indicate that you must wait for the endpoint to be ready, or take some action to resolve an issue. public let status: DBProxyEndpointStatus? @@ -4382,7 +4398,7 @@ extension RDS { public let createdDate: Date? /// The identifier for the RDS proxy associated with this target group. public let dbProxyName: String? - /// Whether this target group is the first one used for connection requests by the associated proxy. Because each proxy is currently associated with a single target group, currently this setting is always true. + /// Indicates whether this target group is the first one used for connection requests by the associated proxy. Because each proxy is currently associated with a single target group, currently this setting is always true. public let isDefault: Bool? /// The current status of this target group. A status of available means the target group is correctly associated with a database. Other values indicate that you must wait for the target group to be ready, or take some action to resolve an issue. public let status: String? @@ -4513,13 +4529,15 @@ extension RDS { public let dbSnapshotIdentifier: String? /// The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. The Oracle SID is also the name of your CDB. public let dbSystemId: String? - /// Specifies whether the DB snapshot is encrypted. + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled. + public let dedicatedLogVolume: Bool? + /// Indicates whether the DB snapshot is encrypted. public let encrypted: Bool? /// Specifies the name of the database engine. public let engine: String? /// Specifies the version of the database engine. public let engineVersion: String? - /// True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false. + /// Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. public let iamDatabaseAuthenticationEnabled: Bool? /// Specifies the time in Coordinated Universal Time (UTC) when the DB instance, from which the snapshot was taken, was created. public let instanceCreateTime: Date? @@ -4569,7 +4587,7 @@ extension RDS { /// Provides the VPC ID associated with the DB snapshot. public let vpcId: String? - public init(allocatedStorage: Int? = nil, availabilityZone: String? = nil, dbInstanceIdentifier: String? = nil, dbiResourceId: String? = nil, dbSnapshotArn: String? = nil, dbSnapshotIdentifier: String? = nil, dbSystemId: String? = nil, encrypted: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, masterUsername: String? = nil, optionGroupName: String? = nil, originalSnapshotCreateTime: Date? = nil, percentProgress: Int? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, snapshotCreateTime: Date? = nil, snapshotDatabaseTime: Date? = nil, snapshotTarget: String? = nil, snapshotType: String? = nil, sourceDBSnapshotIdentifier: String? = nil, sourceRegion: String? = nil, status: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcId: String? = nil) { + public init(allocatedStorage: Int? = nil, availabilityZone: String? = nil, dbInstanceIdentifier: String? = nil, dbiResourceId: String? = nil, dbSnapshotArn: String? = nil, dbSnapshotIdentifier: String? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, encrypted: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, masterUsername: String? = nil, optionGroupName: String? = nil, originalSnapshotCreateTime: Date? = nil, percentProgress: Int? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, snapshotCreateTime: Date? = nil, snapshotDatabaseTime: Date? = nil, snapshotTarget: String? = nil, snapshotType: String? = nil, sourceDBSnapshotIdentifier: String? = nil, sourceRegion: String? = nil, status: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcId: String? = nil) { self.allocatedStorage = allocatedStorage self.availabilityZone = availabilityZone self.dbInstanceIdentifier = dbInstanceIdentifier @@ -4577,6 +4595,7 @@ extension RDS { self.dbSnapshotArn = dbSnapshotArn self.dbSnapshotIdentifier = dbSnapshotIdentifier self.dbSystemId = dbSystemId + self.dedicatedLogVolume = dedicatedLogVolume self.encrypted = encrypted self.engine = engine self.engineVersion = engineVersion @@ -4614,6 +4633,7 @@ extension RDS { case dbSnapshotArn = "DBSnapshotArn" case dbSnapshotIdentifier = "DBSnapshotIdentifier" case dbSystemId = "DBSystemId" + case dedicatedLogVolume = "DedicatedLogVolume" case encrypted = "Encrypted" case engine = "Engine" case engineVersion = "EngineVersion" @@ -4867,11 +4887,11 @@ extension RDS { public struct DeleteDBClusterMessage: AWSEncodableShape { /// The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive. Constraints: Must match an existing DBClusterIdentifier. public let dbClusterIdentifier: String - /// A value that indicates whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. + /// Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. public let deleteAutomatedBackups: Bool? /// The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter Can't end with a hyphen or contain two consecutive hyphens public let finalDBSnapshotIdentifier: String? - /// A value that indicates whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By default, this parameter is disabled. You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled. + /// Specifies whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By default, this parameter is disabled. You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled. public let skipFinalSnapshot: Bool? public init(dbClusterIdentifier: String, deleteAutomatedBackups: Bool? = nil, finalDBSnapshotIdentifier: String? = nil, skipFinalSnapshot: Bool? = nil) { @@ -4971,11 +4991,11 @@ extension RDS { public struct DeleteDBInstanceMessage: AWSEncodableShape { /// The DB instance identifier for the DB instance to be deleted. This parameter isn't case-sensitive. Constraints: Must match the name of an existing DB instance. public let dbInstanceIdentifier: String - /// A value that indicates whether to remove automated backups immediately after the DB instance is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB instance is deleted. + /// Specifies whether to remove automated backups immediately after the DB instance is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB instance is deleted. public let deleteAutomatedBackups: Bool? /// The DBSnapshotIdentifier of the new DBSnapshot created when the SkipFinalSnapshot parameter is disabled. If you enable this parameter and also enable SkipFinalShapshot, the command results in an error. This setting doesn't apply to RDS Custom. Constraints: Must be 1 to 255 letters or numbers. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Can't be specified when deleting a read replica. public let finalDBSnapshotIdentifier: String? - /// A value that indicates whether to skip the creation of a final DB snapshot before deleting the instance. If you enable this parameter, RDS doesn't create a DB snapshot. If you don't enable this parameter, RDS creates a DB snapshot before the DB instance is deleted. By default, skip isn't enabled, and the DB snapshot is created. If you don't enable this parameter, you must specify the FinalDBSnapshotIdentifier parameter. When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, RDS can delete the instance only if you enable this parameter. If you delete a read replica or an RDS Custom instance, you must enable this setting. This setting is required for RDS Custom. + /// Specifies whether to skip the creation of a final DB snapshot before deleting the instance. If you enable this parameter, RDS doesn't create a DB snapshot. If you don't enable this parameter, RDS creates a DB snapshot before the DB instance is deleted. By default, skip isn't enabled, and the DB snapshot is created. If you don't enable this parameter, you must specify the FinalDBSnapshotIdentifier parameter. When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, RDS can delete the instance only if you enable this parameter. If you delete a read replica or an RDS Custom instance, you must enable this setting. This setting is required for RDS Custom. public let skipFinalSnapshot: Bool? public init(dbInstanceIdentifier: String, deleteAutomatedBackups: Bool? = nil, finalDBSnapshotIdentifier: String? = nil, skipFinalSnapshot: Bool? = nil) { @@ -5443,7 +5463,7 @@ extension RDS { public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A value that indicates to return only parameters for a specific source. Parameter sources can be engine, service, or customer. + /// A specific source to return parameters for. Valid Values: customer engine service public let source: String? public init(dbClusterParameterGroupName: String, filters: [Filter]? = nil, marker: String? = nil, maxRecords: Int? = nil, source: String? = nil) { @@ -5500,9 +5520,9 @@ extension RDS { /// A filter that specifies one or more DB cluster snapshots to describe. Supported filters: db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). db-cluster-snapshot-id - Accepts DB cluster snapshot identifiers. snapshot-type - Accepts types of DB cluster snapshots. engine - Accepts names of database engines. @OptionalCustomCoding> public var filters: [Filter]? - /// A value that indicates whether to include manual DB cluster snapshots that are public and can be copied or restored by any Amazon Web Services account. By default, the public snapshots are not included. You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action. + /// Specifies whether to include manual DB cluster snapshots that are public and can be copied or restored by any Amazon Web Services account. By default, the public snapshots are not included. You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action. public let includePublic: Bool? - /// A value that indicates whether to include shared manual DB cluster snapshots from other Amazon Web Services accounts that this Amazon Web Services account has been given permission to copy or restore. By default, these snapshots are not included. You can give an Amazon Web Services account permission to restore a manual DB cluster snapshot from another Amazon Web Services account by the ModifyDBClusterSnapshotAttribute API action. + /// Specifies whether to include shared manual DB cluster snapshots from other Amazon Web Services accounts that this Amazon Web Services account has been given permission to copy or restore. By default, these snapshots are not included. You can give an Amazon Web Services account permission to restore a manual DB cluster snapshot from another Amazon Web Services account by the ModifyDBClusterSnapshotAttribute API action. public let includeShared: Bool? /// An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? @@ -5571,22 +5591,22 @@ extension RDS { public struct DescribeDBEngineVersionsMessage: AWSEncodableShape { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } - /// The name of a specific DB parameter group family to return details for. Constraints: If supplied, must match an existing DBParameterGroupFamily. + /// The name of a specific DB parameter group family to return details for. Constraints: If supplied, must match an existing DB parameter group family. public let dbParameterGroupFamily: String? - /// A value that indicates whether only the default version of the specified engine or engine and major version combination is returned. + /// Specifies whether to return only the default version of the specified engine or the engine and major version combination. public let defaultOnly: Bool? - /// The database engine to return. Valid Values: aurora-mysql aurora-postgresql custom-oracle-ee mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web + /// The database engine to return version details for. Valid Values: aurora-mysql aurora-postgresql custom-oracle-ee mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web public let engine: String? - /// The database engine version to return. Example: 5.1.49 + /// A specific database engine version to return details for. Example: 5.1.49 public let engineVersion: String? /// A filter that specifies one or more DB engine versions to describe. Supported filters: db-parameter-group-family - Accepts parameter groups family names. The results list only includes information about the DB engine versions for these parameter group families. engine - Accepts engine names. The results list only includes information about the DB engine versions for these engines. engine-mode - Accepts DB engine modes. The results list only includes information about the DB engine versions for these engine modes. Valid DB engine modes are the following: global multimaster parallelquery provisioned serverless engine-version - Accepts engine versions. The results list only includes information about the DB engine versions for these engine versions. status - Accepts engine version statuses. The results list only includes information about the DB engine versions for these statuses. Valid statuses are the following: available deprecated @OptionalCustomCoding> public var filters: [Filter]? - /// A value that indicates whether to include engine versions that aren't available in the list. The default is to list only available engine versions. + /// Specifies whether to also list the engine versions that aren't available. The default is to list only available engine versions. public let includeAll: Bool? - /// A value that indicates whether to list the supported character sets for each engine version. If this parameter is enabled and the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version. For RDS Custom, the default is not to list supported character sets. If you set ListSupportedCharacterSets to true, RDS Custom returns no results. + /// Specifies whether to list the supported character sets for each engine version. If this parameter is enabled and the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version. For RDS Custom, the default is not to list supported character sets. If you enable this parameter, RDS Custom returns no results. public let listSupportedCharacterSets: Bool? - /// A value that indicates whether to list the supported time zones for each engine version. If this parameter is enabled and the requested engine supports the TimeZone parameter for CreateDBInstance, the response includes a list of supported time zones for each engine version. For RDS Custom, the default is not to list supported time zones. If you set ListSupportedTimezones to true, RDS Custom returns no results. + /// Specifies whether to list the supported time zones for each engine version. If this parameter is enabled and the requested engine supports the TimeZone parameter for CreateDBInstance, the response includes a list of supported time zones for each engine version. For RDS Custom, the default is not to list supported time zones. If you enable this parameter, RDS Custom returns no results. public let listSupportedTimezones: Bool? /// An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? @@ -6112,9 +6132,9 @@ extension RDS { /// A filter that specifies one or more DB snapshots to describe. Supported filters: db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). db-snapshot-id - Accepts DB snapshot identifiers. dbi-resource-id - Accepts identifiers of source DB instances. snapshot-type - Accepts types of DB snapshots. engine - Accepts names of database engines. @OptionalCustomCoding> public var filters: [Filter]? - /// A value that indicates whether to include manual DB cluster snapshots that are public and can be copied or restored by any Amazon Web Services account. By default, the public snapshots are not included. You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute API. This setting doesn't apply to RDS Custom. + /// Specifies whether to include manual DB cluster snapshots that are public and can be copied or restored by any Amazon Web Services account. By default, the public snapshots are not included. You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute API. This setting doesn't apply to RDS Custom. public let includePublic: Bool? - /// A value that indicates whether to include shared manual DB cluster snapshots from other Amazon Web Services accounts that this Amazon Web Services account has been given permission to copy or restore. By default, these snapshots are not included. You can give an Amazon Web Services account permission to restore a manual DB snapshot from another Amazon Web Services account by using the ModifyDBSnapshotAttribute API action. This setting doesn't apply to RDS Custom. + /// Specifies whether to include shared manual DB cluster snapshots from other Amazon Web Services accounts that this Amazon Web Services account has been given permission to copy or restore. By default, these snapshots are not included. You can give an Amazon Web Services account permission to restore a manual DB snapshot from another Amazon Web Services account by using the ModifyDBSnapshotAttribute API action. This setting doesn't apply to RDS Custom. public let includeShared: Bool? /// An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? @@ -6262,7 +6282,7 @@ extension RDS { /// This parameter isn't currently supported. @OptionalCustomCoding> public var filters: [Filter]? - /// The type of source that is generating the events. For RDS Proxy events, specify db-proxy. Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + /// The type of source that is generating the events. For RDS Proxy events, specify db-proxy. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy public let sourceType: String? public init(filters: [Filter]? = nil, sourceType: String? = nil) { @@ -6511,7 +6531,7 @@ extension RDS { public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 10000. public let maxRecords: Int? - /// A value that indicates whether to show only VPC or non-VPC offerings. RDS Custom supports only VPC offerings. RDS Custom supports only VPC offerings. If you describe non-VPC offerings for RDS Custom, the output shows VPC offerings. + /// Specifies whether to show only VPC or non-VPC offerings. RDS Custom supports only VPC offerings. RDS Custom supports only VPC offerings. If you describe non-VPC offerings for RDS Custom, the output shows VPC offerings. public let vpc: Bool? public init(availabilityZoneGroup: String? = nil, dbInstanceClass: String? = nil, engine: String, engineVersion: String? = nil, filters: [Filter]? = nil, licenseModel: String? = nil, marker: String? = nil, maxRecords: Int? = nil, vpc: Bool? = nil) { @@ -6583,7 +6603,7 @@ extension RDS { public let marker: String? /// The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A value that indicates whether to show only those reservations that support Multi-AZ. + /// Specifies whether to show only those reservations that support Multi-AZ. public let multiAZ: Bool? /// The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" public let offeringType: String? @@ -6637,7 +6657,7 @@ extension RDS { public let marker: String? /// The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A value that indicates whether to show only those reservations that support Multi-AZ. + /// Specifies whether to show only those reservations that support Multi-AZ. public let multiAZ: Bool? /// The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" public let offeringType: String? @@ -6780,7 +6800,7 @@ extension RDS { } public struct DownloadDBLogFilePortionDetails: AWSDecodableShape { - /// Boolean value that if true, indicates there is more data to be downloaded. + /// A Boolean value that, if true, indicates there is more data to be downloaded. public let additionalDataPending: Bool? /// Entries from the specified log file. public let logFileData: String? @@ -7062,7 +7082,7 @@ extension RDS { } public struct ExportTask: AWSDecodableShape { - /// The data exported from the snapshot or cluster. Valid values are the following: database - Export all the data from a specified database. database.table table-name - Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. database.schema schema-name - Export a database schema of the snapshot or cluster. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. database.schema.table table-name - Export a table of the database schema. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. + /// The data exported from the snapshot or cluster. Valid Values: database - Export all the data from a specified database. database.table table-name - Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. database.schema schema-name - Export a database schema of the snapshot or cluster. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. database.schema.table table-name - Export a table of the database schema. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. @OptionalCustomCoding public var exportOnly: [String]? /// A unique identifier for the snapshot or cluster export task. This ID isn't an identifier for the Amazon S3 bucket where the data is exported. @@ -7075,11 +7095,11 @@ extension RDS { public let kmsKeyId: String? /// The progress of the snapshot or cluster export task as a percentage. public let percentProgress: Int? - /// The Amazon S3 bucket that the snapshot or cluster is exported to. + /// The Amazon S3 bucket where the snapshot or cluster is exported to. public let s3Bucket: String? /// The Amazon S3 bucket prefix that is the file name and path of the exported data. public let s3Prefix: String? - /// The time that the snapshot was created. + /// The time when the snapshot was created. public let snapshotTime: Date? /// The Amazon Resource Name (ARN) of the snapshot or cluster exported to Amazon S3. public let sourceArn: String? @@ -7087,9 +7107,9 @@ extension RDS { public let sourceType: ExportSourceType? /// The progress status of the export task. The status can be one of the following: CANCELED CANCELING COMPLETE FAILED IN_PROGRESS STARTING public let status: String? - /// The time that the snapshot or cluster export task ended. + /// The time when the snapshot or cluster export task ended. public let taskEndTime: Date? - /// The time that the snapshot or cluster export task started. + /// The time when the snapshot or cluster export task started. public let taskStartTime: Date? /// The total amount of data exported, in gigabytes. public let totalExtractedDataInGB: Int? @@ -7156,7 +7176,7 @@ extension RDS { } public struct FailoverDBClusterMessage: AWSEncodableShape { - /// A DB cluster identifier to force a failover for. This parameter isn't case-sensitive. Constraints: Must match the identifier of an existing DBCluster. + /// The identifier of the DB cluster to force a failover for. This parameter isn't case-sensitive. Constraints: Must match the identifier of an existing DB cluster. public let dbClusterIdentifier: String /// The name of the DB instance to promote to the primary DB instance. Specify the DB instance identifier for an Aurora Replica or a Multi-AZ readable standby in the DB cluster, for example mydbcluster-replica1. This setting isn't supported for RDS for MySQL Multi-AZ DB clusters. public let targetDBInstanceIdentifier: String? @@ -7334,9 +7354,9 @@ extension RDS { public struct GlobalClusterMember: AWSDecodableShape { /// The Amazon Resource Name (ARN) for each Aurora DB cluster in the global cluster. public let dbClusterArn: String? - /// Specifies whether a secondary cluster in the global cluster has write forwarding enabled, not enabled, or is in the process of enabling it. + /// The status of write forwarding for a secondary cluster in the global cluster. public let globalWriteForwardingStatus: WriteForwardingStatus? - /// Specifies whether the Aurora DB cluster is the primary cluster (that is, has read-write capability) for the global cluster with which it is associated. + /// Indicates whether the Aurora DB cluster is the primary cluster (that is, has read-write capability) for the global cluster with which it is associated. public let isWriter: Bool? /// The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the global cluster. @OptionalCustomCoding @@ -7382,9 +7402,9 @@ extension RDS { } public struct IPRange: AWSDecodableShape { - /// Specifies the IP range. + /// The IP range. public let cidrip: String? - /// Specifies the status of the IP range. Status can be "authorizing", "authorized", "revoking", and "revoked". + /// The status of the IP range. Status can be "authorizing", "authorized", "revoking", and "revoked". public let status: String? public init(cidrip: String? = nil, status: String? = nil) { @@ -7509,7 +7529,7 @@ extension RDS { public struct ModifyCertificatesMessage: AWSEncodableShape { /// The new default certificate identifier to override the current one with. To determine the valid values, use the describe-certificates CLI command or the DescribeCertificates API operation. public let certificateIdentifier: String? - /// A value that indicates whether to remove the override for the default certificate. If the override is removed, the default certificate is the system default. + /// Specifies whether to remove the override for the default certificate. If the override is removed, the default certificate is the system default. public let removeCustomerOverride: Bool? public init(certificateIdentifier: String? = nil, removeCustomerOverride: Bool? = nil) { @@ -7927,6 +7947,8 @@ extension RDS { public var dbSecurityGroups: [String]? /// The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you enable ApplyImmediately. This parameter doesn't apply to RDS Custom DB instances. Constraints: If supplied, must match existing DB subnet group. Example: mydbsubnetgroup public let dbSubnetGroupName: String? + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled. + public let dedicatedLogVolume: Bool? /// Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. public let deletionProtection: Bool? /// Specifies whether to remove the DB instance from the Active Directory domain. @@ -8013,7 +8035,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, automationMode: AutomationMode? = nil, autoMinorVersionUpgrade: Bool? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, certificateRotationRestart: Bool? = nil, cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? = nil, copyTagsToSnapshot: Bool? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String, dbParameterGroupName: String? = nil, dbPortNumber: Int? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, disableDomain: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iops: Int? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, newDBInstanceIdentifier: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, replicaMode: ReplicaMode? = nil, resumeFullAutomationModeMinutes: Int? = nil, rotateMasterUserPassword: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, automationMode: AutomationMode? = nil, autoMinorVersionUpgrade: Bool? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, certificateRotationRestart: Bool? = nil, cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? = nil, copyTagsToSnapshot: Bool? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String, dbParameterGroupName: String? = nil, dbPortNumber: Int? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, disableDomain: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iops: Int? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, newDBInstanceIdentifier: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, replicaMode: ReplicaMode? = nil, resumeFullAutomationModeMinutes: Int? = nil, rotateMasterUserPassword: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.allowMajorVersionUpgrade = allowMajorVersionUpgrade self.applyImmediately = applyImmediately @@ -8031,6 +8053,7 @@ extension RDS { self.dbPortNumber = dbPortNumber self.dbSecurityGroups = dbSecurityGroups self.dbSubnetGroupName = dbSubnetGroupName + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.disableDomain = disableDomain self.domain = domain @@ -8098,6 +8121,7 @@ extension RDS { case dbPortNumber = "DBPortNumber" case dbSecurityGroups = "DBSecurityGroups" case dbSubnetGroupName = "DBSubnetGroupName" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case disableDomain = "DisableDomain" case domain = "Domain" @@ -8426,14 +8450,14 @@ extension RDS { public struct ModifyEventSubscriptionMessage: AWSEncodableShape { public struct _EventCategoriesEncoding: ArrayCoderProperties { public static let member = "EventCategory" } - /// A value that indicates whether to activate the subscription. + /// Specifies whether to activate the subscription. public let enabled: Bool? /// A list of event categories for a source type (SourceType) that you want to subscribe to. You can see a list of the categories for a given source type in Events in the Amazon RDS User Guide or by using the DescribeEventCategories operation. @OptionalCustomCoding> public var eventCategories: [String]? /// The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it. public let snsTopicArn: String? - /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy public let sourceType: String? /// The name of the RDS event notification subscription. public let subscriptionName: String @@ -8511,7 +8535,7 @@ extension RDS { public struct ModifyOptionGroupMessage: AWSEncodableShape { public struct _OptionsToIncludeEncoding: ArrayCoderProperties { public static let member = "OptionConfiguration" } - /// A value that indicates whether to apply the change immediately or during the next maintenance window for each instance associated with the option group. + /// Specifies whether to apply the change immediately or during the next maintenance window for each instance associated with the option group. public let applyImmediately: Bool? /// The name of the option group to be modified. Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance public let optionGroupName: String @@ -8566,9 +8590,9 @@ extension RDS { public var optionSettings: [OptionSetting]? /// The version of the option. public let optionVersion: String? - /// Indicate if this option is permanent. + /// Indicates whether this option is permanent. public let permanent: Bool? - /// Indicate if this option is persistent. + /// Indicates whether this option is persistent. public let persistent: Bool? /// If required, the port configured for this option to use. public let port: Int? @@ -8720,7 +8744,7 @@ extension RDS { public struct _OptionsConflictsWithEncoding: ArrayCoderProperties { public static let member = "OptionConflictName" } public struct _OptionsDependedOnEncoding: ArrayCoderProperties { public static let member = "OptionName" } - /// Specifies whether the option can be copied across Amazon Web Services accounts. + /// Indicates whether the option can be copied across Amazon Web Services accounts. public let copyableCrossAccount: Bool? /// If the option requires a port, specifies the default port for the option. public let defaultPort: Int? @@ -8750,7 +8774,7 @@ extension RDS { public let permanent: Bool? /// Persistent options can't be removed from an option group while DB instances are associated with the option group. If you disassociate all DB instances from the option group, your can remove the persistent option from the option group. public let persistent: Bool? - /// Specifies whether the option requires a port. + /// Indicates whether the option requires a port. public let portRequired: Bool? /// If true, you must enable the Auto Minor Version Upgrade setting for your DB instance before you can use this option. You can enable Auto Minor Version Upgrade when you first create your DB instance, or by modifying your DB instance later. public let requiresAutoMinorEngineVersionUpgrade: Bool? @@ -8809,9 +8833,9 @@ extension RDS { public let applyType: String? /// The default value for the option group option. public let defaultValue: String? - /// Boolean value where true indicates that this option group option can be changed from the default value. + /// Indicates whether this option group option can be changed from the default value. public let isModifiable: Bool? - /// Boolean value where true indicates that a value must be specified for this option setting of the option group option. + /// Indicates whether a value must be specified for this option setting of the option group option. public let isRequired: Bool? /// The minimum DB engine version required for the corresponding allowed value for this option setting. @OptionalCustomCoding> @@ -8894,9 +8918,9 @@ extension RDS { public let defaultValue: String? /// The description of the option setting. public let description: String? - /// Indicates if the option setting is part of a collection. + /// Indicates whether the option setting is part of a collection. public let isCollection: Bool? - /// A Boolean value that, when true, indicates the option setting can be modified from the default. + /// Indicates whether the option setting can be modified from the default. public let isModifiable: Bool? /// The name of the option that has settings that you can set. public let name: String? @@ -8929,7 +8953,7 @@ extension RDS { } public struct OptionVersion: AWSDecodableShape { - /// True if the version is the default version of the option, and otherwise false. + /// Indicates whether the version is the default version of the option. public let isDefault: Bool? /// The version of the option. public let version: String? @@ -8987,11 +9011,11 @@ extension RDS { public let minStorageThroughputPerIops: Double? /// Indicates whether a DB instance is Multi-AZ capable. public let multiAZCapable: Bool? - /// Whether a DB instance supports RDS on Outposts. For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. + /// Indicates whether a DB instance supports RDS on Outposts. For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. public let outpostCapable: Bool? /// Indicates whether a DB instance can have a read replica. public let readReplicaCapable: Bool? - /// Indicates the storage type for a DB instance. + /// The storage type for a DB instance. public let storageType: String? /// The list of supported modes for Database Activity Streams. Aurora PostgreSQL returns the value [sync, async]. Aurora MySQL and RDS for Oracle return [async] only. If Database Activity Streams isn't supported, the return value is an empty list. @OptionalCustomCoding @@ -9002,21 +9026,23 @@ extension RDS { /// The network types supported by the DB instance (IPV4 or DUAL). A DB instance can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. @OptionalCustomCoding public var supportedNetworkTypes: [String]? - /// Whether DB instances can be configured as a Multi-AZ DB cluster. For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. + /// Indicates whether DB instances can be configured as a Multi-AZ DB cluster. For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide. public let supportsClusters: Bool? + /// Indicates whether a DB instance supports using a dedicated log volume (DLV). + public let supportsDedicatedLogVolume: Bool? /// Indicates whether a DB instance supports Enhanced Monitoring at intervals from 1 to 60 seconds. public let supportsEnhancedMonitoring: Bool? - /// A value that indicates whether you can use Aurora global databases with a specific combination of other DB engine attributes. + /// Indicates whether you can use Aurora global databases with a specific combination of other DB engine attributes. public let supportsGlobalDatabases: Bool? /// Indicates whether a DB instance supports IAM database authentication. public let supportsIAMDatabaseAuthentication: Bool? /// Indicates whether a DB instance supports provisioned IOPS. public let supportsIops: Bool? - /// Whether a DB instance supports Kerberos Authentication. + /// Indicates whether a DB instance supports Kerberos Authentication. public let supportsKerberosAuthentication: Bool? - /// True if a DB instance supports Performance Insights, otherwise false. + /// Indicates whether a DB instance supports Performance Insights. public let supportsPerformanceInsights: Bool? - /// Whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class. + /// Indicates whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class. public let supportsStorageAutoscaling: Bool? /// Indicates whether a DB instance supports encrypted storage. public let supportsStorageEncryption: Bool? @@ -9025,7 +9051,7 @@ extension RDS { /// Indicates whether a DB instance is in a VPC. public let vpc: Bool? - public init(availabilityZoneGroup: String? = nil, availabilityZones: [AvailabilityZone]? = nil, availableProcessorFeatures: [AvailableProcessorFeature]? = nil, dbInstanceClass: String? = nil, engine: String? = nil, engineVersion: String? = nil, licenseModel: String? = nil, maxIopsPerDbInstance: Int? = nil, maxIopsPerGib: Double? = nil, maxStorageSize: Int? = nil, maxStorageThroughputPerDbInstance: Int? = nil, maxStorageThroughputPerIops: Double? = nil, minIopsPerDbInstance: Int? = nil, minIopsPerGib: Double? = nil, minStorageSize: Int? = nil, minStorageThroughputPerDbInstance: Int? = nil, minStorageThroughputPerIops: Double? = nil, multiAZCapable: Bool? = nil, outpostCapable: Bool? = nil, readReplicaCapable: Bool? = nil, storageType: String? = nil, supportedActivityStreamModes: [String]? = nil, supportedEngineModes: [String]? = nil, supportedNetworkTypes: [String]? = nil, supportsClusters: Bool? = nil, supportsEnhancedMonitoring: Bool? = nil, supportsGlobalDatabases: Bool? = nil, supportsIAMDatabaseAuthentication: Bool? = nil, supportsIops: Bool? = nil, supportsKerberosAuthentication: Bool? = nil, supportsPerformanceInsights: Bool? = nil, supportsStorageAutoscaling: Bool? = nil, supportsStorageEncryption: Bool? = nil, supportsStorageThroughput: Bool? = nil, vpc: Bool? = nil) { + public init(availabilityZoneGroup: String? = nil, availabilityZones: [AvailabilityZone]? = nil, availableProcessorFeatures: [AvailableProcessorFeature]? = nil, dbInstanceClass: String? = nil, engine: String? = nil, engineVersion: String? = nil, licenseModel: String? = nil, maxIopsPerDbInstance: Int? = nil, maxIopsPerGib: Double? = nil, maxStorageSize: Int? = nil, maxStorageThroughputPerDbInstance: Int? = nil, maxStorageThroughputPerIops: Double? = nil, minIopsPerDbInstance: Int? = nil, minIopsPerGib: Double? = nil, minStorageSize: Int? = nil, minStorageThroughputPerDbInstance: Int? = nil, minStorageThroughputPerIops: Double? = nil, multiAZCapable: Bool? = nil, outpostCapable: Bool? = nil, readReplicaCapable: Bool? = nil, storageType: String? = nil, supportedActivityStreamModes: [String]? = nil, supportedEngineModes: [String]? = nil, supportedNetworkTypes: [String]? = nil, supportsClusters: Bool? = nil, supportsDedicatedLogVolume: Bool? = nil, supportsEnhancedMonitoring: Bool? = nil, supportsGlobalDatabases: Bool? = nil, supportsIAMDatabaseAuthentication: Bool? = nil, supportsIops: Bool? = nil, supportsKerberosAuthentication: Bool? = nil, supportsPerformanceInsights: Bool? = nil, supportsStorageAutoscaling: Bool? = nil, supportsStorageEncryption: Bool? = nil, supportsStorageThroughput: Bool? = nil, vpc: Bool? = nil) { self.availabilityZoneGroup = availabilityZoneGroup self.availabilityZones = availabilityZones self.availableProcessorFeatures = availableProcessorFeatures @@ -9051,6 +9077,7 @@ extension RDS { self.supportedEngineModes = supportedEngineModes self.supportedNetworkTypes = supportedNetworkTypes self.supportsClusters = supportsClusters + self.supportsDedicatedLogVolume = supportsDedicatedLogVolume self.supportsEnhancedMonitoring = supportsEnhancedMonitoring self.supportsGlobalDatabases = supportsGlobalDatabases self.supportsIAMDatabaseAuthentication = supportsIAMDatabaseAuthentication @@ -9089,6 +9116,7 @@ extension RDS { case supportedEngineModes = "SupportedEngineModes" case supportedNetworkTypes = "SupportedNetworkTypes" case supportsClusters = "SupportsClusters" + case supportsDedicatedLogVolume = "SupportsDedicatedLogVolume" case supportsEnhancedMonitoring = "SupportsEnhancedMonitoring" case supportsGlobalDatabases = "SupportsGlobalDatabases" case supportsIAMDatabaseAuthentication = "SupportsIAMDatabaseAuthentication" @@ -9150,11 +9178,11 @@ extension RDS { public let isModifiable: Bool? /// The earliest engine version to which the parameter can apply. public let minimumEngineVersion: String? - /// Specifies the name of the parameter. + /// The name of the parameter. public let parameterName: String? - /// Specifies the value of the parameter. + /// The value of the parameter. public let parameterValue: String? - /// Indicates the source of the parameter value. + /// The source of the parameter value. public let source: String? /// The valid DB engine modes. @OptionalCustomCoding @@ -9278,11 +9306,13 @@ extension RDS { public let dbInstanceIdentifier: String? /// The DB subnet group for the DB instance. public let dbSubnetGroupName: String? + /// Indicates whether the DB instance has a dedicated log volume (DLV) enabled.> + public let dedicatedLogVolume: Bool? /// The database engine of the DB instance. public let engine: String? /// The database engine version. public let engineVersion: String? - /// Whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. + /// Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. public let iamDatabaseAuthenticationEnabled: Bool? /// The Provisioned IOPS value for the DB instance. public let iops: Int? @@ -9290,7 +9320,7 @@ extension RDS { public let licenseModel: String? /// The master credentials for the DB instance. public let masterUserPassword: String? - /// A value that indicates that the Single-AZ DB instance will change to a Multi-AZ deployment. + /// Indicates whether the Single-AZ DB instance will change to a Multi-AZ deployment. public let multiAZ: Bool? public let pendingCloudwatchLogsExports: PendingCloudwatchLogsExports? /// The port for the DB instance. @@ -9305,7 +9335,7 @@ extension RDS { /// The storage type of the DB instance. public let storageType: String? - public init(allocatedStorage: Int? = nil, automationMode: AutomationMode? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbSubnetGroupName: String? = nil, engine: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, iops: Int? = nil, licenseModel: String? = nil, masterUserPassword: String? = nil, multiAZ: Bool? = nil, pendingCloudwatchLogsExports: PendingCloudwatchLogsExports? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, resumeFullAutomationModeTime: Date? = nil, storageThroughput: Int? = nil, storageType: String? = nil) { + public init(allocatedStorage: Int? = nil, automationMode: AutomationMode? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, iops: Int? = nil, licenseModel: String? = nil, masterUserPassword: String? = nil, multiAZ: Bool? = nil, pendingCloudwatchLogsExports: PendingCloudwatchLogsExports? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, resumeFullAutomationModeTime: Date? = nil, storageThroughput: Int? = nil, storageType: String? = nil) { self.allocatedStorage = allocatedStorage self.automationMode = automationMode self.backupRetentionPeriod = backupRetentionPeriod @@ -9313,6 +9343,7 @@ extension RDS { self.dbInstanceClass = dbInstanceClass self.dbInstanceIdentifier = dbInstanceIdentifier self.dbSubnetGroupName = dbSubnetGroupName + self.dedicatedLogVolume = dedicatedLogVolume self.engine = engine self.engineVersion = engineVersion self.iamDatabaseAuthenticationEnabled = iamDatabaseAuthenticationEnabled @@ -9336,6 +9367,7 @@ extension RDS { case dbInstanceClass = "DBInstanceClass" case dbInstanceIdentifier = "DBInstanceIdentifier" case dbSubnetGroupName = "DBSubnetGroupName" + case dedicatedLogVolume = "DedicatedLogVolume" case engine = "Engine" case engineVersion = "EngineVersion" case iamDatabaseAuthenticationEnabled = "IAMDatabaseAuthenticationEnabled" @@ -9515,7 +9547,7 @@ extension RDS { public struct RebootDBInstanceMessage: AWSEncodableShape { /// The DB instance identifier. This parameter is stored as a lowercase string. Constraints: Must match the identifier of an existing DBInstance. public let dbInstanceIdentifier: String - /// A value that indicates whether the reboot is conducted through a Multi-AZ failover. Constraint: You can't enable force failover if the instance isn't configured for Multi-AZ. + /// Specifies whether the reboot is conducted through a Multi-AZ failover. Constraint: You can't enable force failover if the instance isn't configured for Multi-AZ. public let forceFailover: Bool? public init(dbInstanceIdentifier: String, forceFailover: Bool? = nil) { @@ -9732,7 +9764,7 @@ extension RDS { public let fixedPrice: Double? /// The unique identifier for the lease associated with the reserved DB instance. Amazon Web Services Support might request the lease ID for an issue related to a reserved DB instance. public let leaseId: String? - /// Indicates if the reservation applies to Multi-AZ deployments. + /// Indicates whether the reservation applies to Multi-AZ deployments. public let multiAZ: Bool? /// The offering type of this reserved DB instance. public let offeringType: String? @@ -9824,7 +9856,7 @@ extension RDS { public let duration: Int? /// The fixed price charged for this offering. public let fixedPrice: Double? - /// Indicates if the offering applies to Multi-AZ deployments. + /// Indicates whether the offering applies to Multi-AZ deployments. public let multiAZ: Bool? /// The offering type. public let offeringType: String? @@ -9893,7 +9925,7 @@ extension RDS { /// A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is enabled. @OptionalCustomCoding> public var parameters: [Parameter]? - /// A value that indicates whether to reset all parameters in the DB cluster parameter group to their default values. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter. + /// Specifies whether to reset all parameters in the DB cluster parameter group to their default values. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter. public let resetAllParameters: Bool? public init(dbClusterParameterGroupName: String, parameters: [Parameter]? = nil, resetAllParameters: Bool? = nil) { @@ -9917,7 +9949,7 @@ extension RDS { /// To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod. A maximum of 20 parameters can be modified in a single request. MySQL Valid Values (for Apply method): immediate | pending-reboot You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots. MariaDB Valid Values (for Apply method): immediate | pending-reboot You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots. Oracle Valid Values (for Apply method): pending-reboot @OptionalCustomCoding> public var parameters: [Parameter]? - /// A value that indicates whether to reset all parameters in the DB parameter group to default values. By default, all parameters in the DB parameter group are reset to default values. + /// Specifies whether to reset all parameters in the DB parameter group to default values. By default, all parameters in the DB parameter group are reset to default values. public let resetAllParameters: Bool? public init(dbParameterGroupName: String, parameters: [Parameter]? = nil, resetAllParameters: Bool? = nil) { @@ -9967,7 +9999,7 @@ extension RDS { public let backupRetentionPeriod: Int? /// A value that indicates that the restored DB cluster should be associated with the specified CharacterSet. public let characterSetName: String? - /// A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them. + /// Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them. public let copyTagsToSnapshot: Bool? /// The database name for the restored DB cluster. public let databaseName: String? @@ -9977,7 +10009,7 @@ extension RDS { public let dbClusterParameterGroupName: String? /// A DB subnet group to associate with the restored DB cluster. Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup public let dbSubnetGroupName: String? - /// A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. + /// Specifies whether to enable deletion protection for the DB cluster. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. public let deletionProtection: Bool? /// Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos Authentication in the Amazon Aurora User Guide. public let domain: String? @@ -9986,7 +10018,7 @@ extension RDS { /// The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. Aurora MySQL Possible values are audit, error, general, and slowquery. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. @OptionalCustomCoding public var enableCloudwatchLogsExports: [String]? - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. + /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. public let enableIAMDatabaseAuthentication: Bool? /// The name of the database engine to be used for this DB cluster. Valid Values: aurora-mysql (for Aurora MySQL) public let engine: String @@ -9994,7 +10026,7 @@ extension RDS { public let engineVersion: String? /// The Amazon Web Services KMS key identifier for an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If the StorageEncrypted parameter is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. public let kmsKeyId: String? - /// A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. + /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. public let manageMasterUserPassword: Bool? /// The name of the master user for the restored DB cluster. Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. public let masterUsername: String @@ -10002,7 +10034,7 @@ extension RDS { public let masterUserPassword: String? /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. public let masterUserSecretKmsKeyId: String? - /// The network type of the DB cluster. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. + /// The network type of the DB cluster. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. public let networkType: String? /// A value that indicates that the restored DB cluster should be associated with the specified option group. Permanent options can't be removed from an option group. An option group can't be removed from a DB cluster once it is associated with a DB cluster. public let optionGroupName: String? @@ -10019,13 +10051,13 @@ extension RDS { /// The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. public let s3Prefix: String? public let serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? - /// The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. Valid values: mysql + /// The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket. Valid Values: mysql public let sourceEngine: String /// The version of the database that the backup files were created from. MySQL versions 5.7 and 8.0 are supported. Example: 5.7.40, 8.0.28 public let sourceEngineVersion: String - /// A value that indicates whether the restored DB cluster is encrypted. + /// Specifies whether the restored DB cluster is encrypted. public let storageEncrypted: Bool? - /// Specifies the storage type to be associated with the DB cluster. Valid values: aurora, aurora-iopt1 Default: aurora Valid for: Aurora DB clusters only + /// Specifies the storage type to be associated with the DB cluster. Valid Values: aurora, aurora-iopt1 Default: aurora Valid for: Aurora DB clusters only public let storageType: String? @OptionalCustomCoding> public var tags: [Tag]? @@ -10134,7 +10166,7 @@ extension RDS { public var availabilityZones: [String]? /// The target backtrack window, in seconds. To disable backtracking, set this value to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). Valid for: Aurora DB clusters only public let backtrackWindow: Int64? - /// A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them. Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? /// The database name for the restored DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let databaseName: String? @@ -10146,16 +10178,16 @@ extension RDS { public let dbClusterParameterGroupName: String? /// The name of the DB subnet group to use for the new DB cluster. Constraints: If supplied, must match the name of an existing DB subnet group. Example: mydbsubnetgroup Valid for: Aurora DB clusters and Multi-AZ DB clusters public let dbSubnetGroupName: String? - /// A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies whether to enable deletion protection for the DB cluster. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let deletionProtection: Bool? - /// Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. Valid for: Aurora DB clusters only + /// The Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. Valid for: Aurora DB clusters only public let domain: String? - /// Specify the name of the IAM role to be used when making API calls to the Directory Service. Valid for: Aurora DB clusters only + /// The name of the IAM role to be used when making API calls to the Directory Service. Valid for: Aurora DB clusters only public let domainIAMRoleName: String? /// The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used. RDS for MySQL Possible values are error, general, and slowquery. RDS for PostgreSQL Possible values are postgresql and upgrade. Aurora MySQL Possible values are audit, error, general, and slowquery. Aurora PostgreSQL Possible value is postgresql. For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters @OptionalCustomCoding public var enableCloudwatchLogsExports: [String]? - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only + /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only public let enableIAMDatabaseAuthentication: Bool? /// The database engine to use for the new DB cluster. Default: The same as source Constraint: Must be compatible with the engine of the source Valid for: Aurora DB clusters and Multi-AZ DB clusters public let engine: String @@ -10167,20 +10199,20 @@ extension RDS { public let iops: Int? /// The Amazon Web Services KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. When you don't specify a value for the KmsKeyId parameter, then the following occurs: If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB snapshot or DB cluster snapshot. If the DB snapshot or DB cluster snapshot in SnapshotIdentifier isn't encrypted, then the restored DB cluster isn't encrypted. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let kmsKeyId: String? - /// The network type of the DB cluster. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only + /// The network type of the DB cluster. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only public let networkType: String? /// The name of the option group to use for the restored DB cluster. DB clusters are associated with a default option group that can't be modified. public let optionGroupName: String? /// The port number on which the new DB cluster accepts connections. Constraints: This value must be 1150-65535 Default: The same port as the original DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let port: Int? - /// A value that indicates whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let publiclyAccessible: Bool? /// For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster. Valid for: Aurora DB clusters only public let scalingConfiguration: ScalingConfiguration? public let serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? /// The identifier for the DB snapshot or DB cluster snapshot to restore from. You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot. Constraints: Must match the identifier of an existing Snapshot. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let snapshotIdentifier: String - /// Specifies the storage type to be associated with the DB cluster. When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies the storage type to be associated with the DB cluster. When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters public let storageType: String? /// The tags to be assigned to the restored DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters @OptionalCustomCoding> @@ -10270,7 +10302,7 @@ extension RDS { /// The target backtrack window, in seconds. To disable backtracking, set this value to 0. Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). Valid for: Aurora MySQL DB clusters only public let backtrackWindow: Int64? - /// A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them. Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? /// The name of the new DB cluster to be created. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens First character must be a letter Can't end with a hyphen or contain two consecutive hyphens Valid for: Aurora DB clusters and Multi-AZ DB clusters public let dbClusterIdentifier: String @@ -10280,16 +10312,16 @@ extension RDS { public let dbClusterParameterGroupName: String? /// The DB subnet group name to use for the new DB cluster. Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup Valid for: Aurora DB clusters and Multi-AZ DB clusters public let dbSubnetGroupName: String? - /// A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies whether to enable deletion protection for the DB cluster. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let deletionProtection: Bool? - /// Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only + /// The Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only public let domain: String? - /// Specify the name of the IAM role to be used when making API calls to the Directory Service. Valid for: Aurora DB clusters only + /// The name of the IAM role to be used when making API calls to the Directory Service. Valid for: Aurora DB clusters only public let domainIAMRoleName: String? /// The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. RDS for MySQL Possible values are error, general, and slowquery. RDS for PostgreSQL Possible values are postgresql and upgrade. Aurora MySQL Possible values are audit, error, general, and slowquery. Aurora PostgreSQL Possible value is postgresql. For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters @OptionalCustomCoding public var enableCloudwatchLogsExports: [String]? - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only + /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only public let enableIAMDatabaseAuthentication: Bool? /// The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster. Valid for: Aurora DB clusters only public let engineMode: String? @@ -10297,13 +10329,13 @@ extension RDS { public let iops: Int? /// The Amazon Web Services KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different from the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter. If you don't specify a value for the KmsKeyId parameter, then the following occurs: If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster. If the DB cluster isn't encrypted, then the restored DB cluster isn't encrypted. If DBClusterIdentifier refers to a DB cluster that isn't encrypted, then the restore request is rejected. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let kmsKeyId: String? - /// The network type of the DB cluster. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only + /// The network type of the DB cluster. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only public let networkType: String? /// The name of the option group for the new DB cluster. DB clusters are associated with a default option group that can't be modified. public let optionGroupName: String? /// The port number on which the new DB cluster accepts connections. Constraints: A value from 1150-65535. Default: The default port for the engine. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let port: Int? - /// A value that indicates whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: Multi-AZ DB clusters only + /// Specifies whether the DB cluster is publicly accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: Multi-AZ DB clusters only public let publiclyAccessible: Bool? /// The date and time to restore the DB cluster to. Valid Values: Value must be a time in Universal Coordinated Time (UTC) format Constraints: Must be before the latest restorable time for the DB instance Must be specified if UseLatestRestorableTime parameter isn't provided Can't be specified if the UseLatestRestorableTime parameter is enabled Can't be specified if the RestoreType parameter is copy-on-write Example: 2015-03-07T23:45:00Z Valid for: Aurora DB clusters and Multi-AZ DB clusters public let restoreToTime: Date? @@ -10316,11 +10348,11 @@ extension RDS { public let sourceDBClusterIdentifier: String? /// The resource ID of the source DB cluster from which to restore. public let sourceDbClusterResourceId: String? - /// Specifies the storage type to be associated with the DB cluster. When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies the storage type to be associated with the DB cluster. When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters public let storageType: String? @OptionalCustomCoding> public var tags: [Tag]? - /// A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster isn't restored to the latest restorable backup time. Constraints: Can't be specified if RestoreToTime parameter is provided. Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Specifies whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster isn't restored to the latest restorable backup time. Constraints: Can't be specified if RestoreToTime parameter is provided. Valid for: Aurora DB clusters and Multi-AZ DB clusters public let useLatestRestorableTime: Bool? /// A list of VPC security groups that the new DB cluster belongs to. Valid for: Aurora DB clusters and Multi-AZ DB clusters @OptionalCustomCoding> @@ -10408,13 +10440,13 @@ extension RDS { /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? - /// A value that indicates whether minor version upgrades are applied automatically to the DB instance during the maintenance window. If you restore an RDS Custom DB instance, you must disable this parameter. + /// Specifies whether to automatically apply minor version upgrades to the DB instance during the maintenance window. If you restore an RDS Custom DB instance, you must disable this parameter. public let autoMinorVersionUpgrade: Bool? /// The Availability Zone (AZ) where the DB instance will be created. Default: A random, system-chosen Availability Zone. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a public let availabilityZone: String? /// Specifies where automated backups and manual snapshots are stored for the restored DB instance. Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. public let backupTarget: String? - /// A value that indicates whether to copy all tags from the restored DB instance to snapshots of the DB instance. In most cases, tags aren't copied by default. However, when you restore a DB instance from a DB snapshot, RDS checks whether you specify new tags. If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS looks for the tags from the source DB instance for the DB snapshot, and then adds those tags to the restored DB instance. For more information, see Copying tags to DB instance snapshots in the Amazon RDS User Guide. + /// Specifies whether to copy all tags from the restored DB instance to snapshots of the DB instance. In most cases, tags aren't copied by default. However, when you restore a DB instance from a DB snapshot, RDS checks whether you specify new tags. If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS looks for the tags from the source DB instance for the DB snapshot, and then adds those tags to the restored DB instance. For more information, see Copying tags to DB instance snapshots in the Amazon RDS User Guide. public let copyTagsToSnapshot: Bool? /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom. public let customIamInstanceProfile: String? @@ -10432,11 +10464,13 @@ extension RDS { public let dbSnapshotIdentifier: String? /// The DB subnet group name to use for the new instance. Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup public let dbSubnetGroupName: String? - /// A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. + /// Specifies whether to enable a dedicated log volume (DLV) for the DB instance. + public let dedicatedLogVolume: Bool? + /// Specifies whether to enable deletion protection for the DB instance. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. public let deletionProtection: Bool? /// Specify the Active Directory directory ID to restore the DB instance in. The domain/ must be created prior to this operation. Currently, you can create only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. public let domain: String? - /// The ARN for the Secrets Manager secret with the credentials for the user joining the domain. Constraints: Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 + /// The ARN for the Secrets Manager secret with the credentials for the user joining the domain. Constraints: Can't be longer than 64 characters. Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 public let domainAuthSecretArn: String? /// The IPv4 DNS IP addresses of your primary and secondary Active Directory domain controllers. Constraints: Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list. Example: 123.124.125.126,234.235.236.237 @OptionalCustomCoding @@ -10450,19 +10484,19 @@ extension RDS { /// The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. @OptionalCustomCoding public var enableCloudwatchLogsExports: [String]? - /// A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network. This setting doesn't apply to RDS Custom. For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. + /// Specifies whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network. This setting doesn't apply to RDS Custom. For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide. public let enableCustomerOwnedIp: Bool? - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. public let enableIAMDatabaseAuthentication: Bool? /// The database engine to use for the new instance. This setting doesn't apply to RDS Custom. Default: The same as source Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. Valid Values: mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web public let engine: String? /// Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter isn't specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts. The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be an integer greater than 1000. public let iops: Int? - /// License model information for the restored DB instance. This setting doesn't apply to RDS Custom. Default: Same as source. Valid values: license-included | bring-your-own-license | general-public-license + /// License model information for the restored DB instance. This setting doesn't apply to RDS Custom. Default: Same as source. Valid Values: license-included | bring-your-own-license | general-public-license public let licenseModel: String? - /// A value that indicates whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. + /// Specifies whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. public let multiAZ: Bool? - /// The network type of the DB instance. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. + /// The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. public let networkType: String? /// The name of the option group to be used for the restored DB instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance after it is associated with a DB instance. This setting doesn't apply to RDS Custom. public let optionGroupName: String? @@ -10471,11 +10505,11 @@ extension RDS { /// The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. This setting doesn't apply to RDS Custom. @OptionalCustomCoding> public var processorFeatures: [ProcessorFeature]? - /// A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. + /// Specifies whether the DB instance is publicly accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. public let publiclyAccessible: Bool? /// Specifies the storage throughput value for the DB instance. This setting doesn't apply to RDS Custom or Amazon Aurora. public let storageThroughput: Int? - /// Specifies the storage type to be associated with the DB instance. Valid values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise gp2 + /// Specifies the storage type to be associated with the DB instance. Valid Values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise gp2 public let storageType: String? @OptionalCustomCoding> public var tags: [Tag]? @@ -10483,13 +10517,13 @@ extension RDS { public let tdeCredentialArn: String? /// The password for the given ARN from the key store in order to access the device. This setting doesn't apply to RDS Custom. public let tdeCredentialPassword: String? - /// A value that indicates whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom. + /// Specifies whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom. public let useDefaultProcessorFeatures: Bool? /// A list of EC2 VPC security groups to associate with this DB instance. Default: The default EC2 VPC security group for the DB subnet group's VPC. @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterSnapshotIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSnapshotIdentifier: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, iops: Int? = nil, licenseModel: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterSnapshotIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSnapshotIdentifier: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, iops: Int? = nil, licenseModel: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -10503,6 +10537,7 @@ extension RDS { self.dbParameterGroupName = dbParameterGroupName self.dbSnapshotIdentifier = dbSnapshotIdentifier self.dbSubnetGroupName = dbSubnetGroupName + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.domain = domain self.domainAuthSecretArn = domainAuthSecretArn @@ -10545,6 +10580,7 @@ extension RDS { case dbParameterGroupName = "DBParameterGroupName" case dbSnapshotIdentifier = "DBSnapshotIdentifier" case dbSubnetGroupName = "DBSubnetGroupName" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case domain = "Domain" case domainAuthSecretArn = "DomainAuthSecretArn" @@ -10594,13 +10630,13 @@ extension RDS { /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? - /// A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are not applied automatically. + /// Specifies whether to automatically apply minor engine upgrades to the DB instance during the maintenance window. By default, minor engine upgrades are not applied automatically. public let autoMinorVersionUpgrade: Bool? /// The Availability Zone that the DB instance is created in. For information about Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones in the Amazon RDS User Guide. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint. public let availabilityZone: String? /// The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. For more information, see CreateDBInstance. public let backupRetentionPeriod: Int? - /// A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. + /// Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. public let copyTagsToSnapshot: Bool? /// The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class. public let dbInstanceClass: String @@ -10615,14 +10651,16 @@ extension RDS { public var dbSecurityGroups: [String]? /// A DB subnet group to associate with this DB instance. Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: mydbsubnetgroup public let dbSubnetGroupName: String? - /// A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. + /// Specifies whether to enable a dedicated log volume (DLV) for the DB instance. + public let dedicatedLogVolume: Bool? + /// Specifies whether to enable deletion protection for the DB instance. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. public let deletionProtection: Bool? /// The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. @OptionalCustomCoding public var enableCloudwatchLogsExports: [String]? - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. + /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. public let enableIAMDatabaseAuthentication: Bool? - /// A value that indicates whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. + /// Specifies whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. public let enablePerformanceInsights: Bool? /// The name of the database engine to be used for this instance. Valid Values: mysql public let engine: String @@ -10634,7 +10672,7 @@ extension RDS { public let kmsKeyId: String? /// The license model for this DB instance. Use general-public-license. public let licenseModel: String? - /// A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. + /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. public let manageMasterUserPassword: Bool? /// The name for the master user. Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. public let masterUsername: String? @@ -10648,9 +10686,9 @@ extension RDS { public let monitoringInterval: Int? /// The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. public let monitoringRoleArn: String? - /// A value that indicates whether the DB instance is a Multi-AZ deployment. If the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone parameter. + /// Specifies whether the DB instance is a Multi-AZ deployment. If the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone parameter. public let multiAZ: Bool? - /// The network type of the DB instance. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. + /// The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. public let networkType: String? /// The name of the option group to associate with this DB instance. If this argument is omitted, the default option group for the specified engine is used. public let optionGroupName: String? @@ -10667,7 +10705,7 @@ extension RDS { /// The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. @OptionalCustomCoding> public var processorFeatures: [ProcessorFeature]? - /// A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. + /// Specifies whether the DB instance is publicly accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance. public let publiclyAccessible: Bool? /// The name of your Amazon S3 bucket that contains your database backup file. public let s3BucketName: String @@ -10679,22 +10717,22 @@ extension RDS { public let sourceEngine: String /// The version of the database that the backup files were created from. MySQL versions 5.6 and 5.7 are supported. Example: 5.6.40 public let sourceEngineVersion: String - /// A value that indicates whether the new DB instance is encrypted or not. + /// Specifies whether the new DB instance is encrypted or not. public let storageEncrypted: Bool? /// Specifies the storage throughput value for the DB instance. This setting doesn't apply to RDS Custom or Amazon Aurora. public let storageThroughput: Int? - /// Specifies the storage type to be associated with the DB instance. Valid values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise gp2 + /// Specifies the storage type to be associated with the DB instance. Valid Values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise gp2 public let storageType: String? /// A list of tags to associate with this DB instance. For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide. @OptionalCustomCoding> public var tags: [Tag]? - /// A value that indicates whether the DB instance class of the DB instance uses its default processor features. + /// Specifies whether the DB instance class of the DB instance uses its default processor features. public let useDefaultProcessorFeatures: Bool? /// A list of VPC security groups to associate with this DB instance. @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, copyTagsToSnapshot: Bool? = nil, dbInstanceClass: String, dbInstanceIdentifier: String, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, s3BucketName: String, s3IngestionRoleArn: String, s3Prefix: String? = nil, sourceEngine: String, sourceEngineVersion: String, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, copyTagsToSnapshot: Bool? = nil, dbInstanceClass: String, dbInstanceIdentifier: String, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, s3BucketName: String, s3IngestionRoleArn: String, s3Prefix: String? = nil, sourceEngine: String, sourceEngineVersion: String, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -10706,6 +10744,7 @@ extension RDS { self.dbParameterGroupName = dbParameterGroupName self.dbSecurityGroups = dbSecurityGroups self.dbSubnetGroupName = dbSubnetGroupName + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.enableCloudwatchLogsExports = enableCloudwatchLogsExports self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication @@ -10757,6 +10796,7 @@ extension RDS { case dbParameterGroupName = "DBParameterGroupName" case dbSecurityGroups = "DBSecurityGroups" case dbSubnetGroupName = "DBSubnetGroupName" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case enableCloudwatchLogsExports = "EnableCloudwatchLogsExports" case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" @@ -10834,6 +10874,8 @@ extension RDS { public let dbParameterGroupName: String? /// The DB subnet group name to use for the new instance. Constraints: If supplied, must match the name of an existing DB subnet group. Example: mydbsubnetgroup public let dbSubnetGroupName: String? + /// Specifies whether to enable a dedicated log volume (DLV) for the DB instance. + public let dedicatedLogVolume: Bool? /// Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. public let deletionProtection: Bool? /// The Active Directory directory ID to restore the DB instance in. Create the domain before running this command. Currently, you can create only the MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. This setting doesn't apply to RDS Custom. For more information, see Kerberos Authentication in the Amazon RDS User Guide. @@ -10897,7 +10939,7 @@ extension RDS { public let tdeCredentialArn: String? /// The password for the given ARN from the key store in order to access the device. This setting doesn't apply to RDS Custom. public let tdeCredentialPassword: String? - /// A value that indicates whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom. + /// Specifies whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom. public let useDefaultProcessorFeatures: Bool? /// Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. Constraints: Can't be specified if the RestoreTime parameter is provided. public let useLatestRestorableTime: Bool? @@ -10905,7 +10947,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbInstanceClass: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, iops: Int? = nil, licenseModel: String? = nil, maxAllocatedStorage: Int? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, restoreTime: Date? = nil, sourceDBInstanceAutomatedBackupsArn: String? = nil, sourceDBInstanceIdentifier: String? = nil, sourceDbiResourceId: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, targetDBInstanceIdentifier: String, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, useLatestRestorableTime: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbInstanceClass: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, iops: Int? = nil, licenseModel: String? = nil, maxAllocatedStorage: Int? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, restoreTime: Date? = nil, sourceDBInstanceAutomatedBackupsArn: String? = nil, sourceDBInstanceIdentifier: String? = nil, sourceDbiResourceId: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, targetDBInstanceIdentifier: String, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, useLatestRestorableTime: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -10916,6 +10958,7 @@ extension RDS { self.dbName = dbName self.dbParameterGroupName = dbParameterGroupName self.dbSubnetGroupName = dbSubnetGroupName + self.dedicatedLogVolume = dedicatedLogVolume self.deletionProtection = deletionProtection self.domain = domain self.domainAuthSecretArn = domainAuthSecretArn @@ -10962,6 +11005,7 @@ extension RDS { case dbName = "DBName" case dbParameterGroupName = "DBParameterGroupName" case dbSubnetGroupName = "DBSubnetGroupName" + case dedicatedLogVolume = "DedicatedLogVolume" case deletionProtection = "DeletionProtection" case domain = "Domain" case domainAuthSecretArn = "DomainAuthSecretArn" @@ -11069,7 +11113,7 @@ extension RDS { } public struct ScalingConfiguration: AWSEncodableShape { - /// A value that indicates whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. + /// Indicates whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. public let autoPause: Bool? /// The maximum capacity for an Aurora DB cluster in serverless DB engine mode. For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384. The maximum capacity must be greater than or equal to the minimum capacity. public let maxCapacity: Int? @@ -11102,7 +11146,7 @@ extension RDS { } public struct ScalingConfigurationInfo: AWSDecodableShape { - /// A value that indicates whether automatic pause is allowed for the Aurora DB cluster in serverless DB engine mode. When the value is set to false for an Aurora Serverless v1 DB cluster, the DB cluster automatically resumes. + /// Indicates whether automatic pause is allowed for the Aurora DB cluster in serverless DB engine mode. When the value is set to false for an Aurora Serverless v1 DB cluster, the DB cluster automatically resumes. public let autoPause: Bool? /// The maximum capacity for an Aurora DB cluster in serverless DB engine mode. public let maxCapacity: Int? @@ -11175,7 +11219,7 @@ extension RDS { public let regionName: String? /// The status of the source Amazon Web Services Region. public let status: String? - /// Whether the source Amazon Web Services Region supports replicating automated backups to the current Amazon Web Services Region. + /// Indicates whether the source Amazon Web Services Region supports replicating automated backups to the current Amazon Web Services Region. public let supportsDBInstanceAutomatedBackupsReplication: Bool? public init(endpoint: String? = nil, regionName: String? = nil, status: String? = nil, supportsDBInstanceAutomatedBackupsReplication: Bool? = nil) { @@ -11363,7 +11407,7 @@ extension RDS { } public struct StartExportTaskMessage: AWSEncodableShape { - /// The data to be exported from the snapshot or cluster. If this parameter is not provided, all of the data is exported. Valid values are the following: database - Export all the data from a specified database. database.table table-name - Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. database.schema schema-name - Export a database schema of the snapshot or cluster. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. database.schema.table table-name - Export a table of the database schema. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. + /// The data to be exported from the snapshot or cluster. If this parameter isn't provided, all of the data is exported. Valid Values: database - Export all the data from a specified database. database.table table-name - Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. database.schema schema-name - Export a database schema of the snapshot or cluster. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. database.schema.table table-name - Export a table of the database schema. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. @OptionalCustomCoding public var exportOnly: [String]? /// A unique identifier for the export task. This ID isn't an identifier for the Amazon S3 bucket where the data is to be exported. @@ -11729,7 +11773,7 @@ extension RDS { } public struct UpgradeTarget: AWSDecodableShape { - /// A value that indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true. + /// Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true. public let autoUpgrade: Bool? /// The version of the database engine that a DB instance can be upgraded to. public let description: String? @@ -11737,18 +11781,18 @@ extension RDS { public let engine: String? /// The version number of the upgrade target database engine. public let engineVersion: String? - /// A value that indicates whether upgrading to the target version requires upgrading the major version of the database engine. + /// Indicates whether upgrading to the target version requires upgrading the major version of the database engine. public let isMajorVersionUpgrade: Bool? /// A list of the supported DB engine modes for the target engine version. @OptionalCustomCoding public var supportedEngineModes: [String]? - /// A value that indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version. + /// Indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version. public let supportsBabelfish: Bool? - /// A value that indicates whether you can use Aurora global databases with the target engine version. + /// Indicates whether you can use Aurora global databases with the target engine version. public let supportsGlobalDatabases: Bool? - /// A value that indicates whether the target engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only + /// Indicates whether the target engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only public let supportsLocalWriteForwarding: Bool? - /// A value that indicates whether you can use Aurora parallel query with the target engine version. + /// Indicates whether you can use Aurora parallel query with the target engine version. public let supportsParallelQuery: Bool? public init(autoUpgrade: Bool? = nil, description: String? = nil, engine: String? = nil, engineVersion: String? = nil, isMajorVersionUpgrade: Bool? = nil, supportedEngineModes: [String]? = nil, supportsBabelfish: Bool? = nil, supportsGlobalDatabases: Bool? = nil, supportsLocalWriteForwarding: Bool? = nil, supportsParallelQuery: Bool? = nil) { @@ -11785,7 +11829,7 @@ extension RDS { public let clientPasswordAuthType: ClientPasswordAuthType? /// A user-specified description about the authentication used by a proxy to log in as a specific database user. public let description: String? - /// Whether to require or disallow Amazon Web Services Identity and Access Management (IAM) authentication for connections to the proxy. The ENABLED value is valid only for proxies with RDS for Microsoft SQL Server. + /// A value that indicates whether to require or disallow Amazon Web Services Identity and Access Management (IAM) authentication for connections to the proxy. The ENABLED value is valid only for proxies with RDS for Microsoft SQL Server. public let iamAuth: IAMAuthMode? /// The Amazon Resource Name (ARN) representing the secret that the proxy uses to authenticate to the RDS DB instance or Aurora DB cluster. These secrets are stored within Amazon Secrets Manager. public let secretArn: String? @@ -11851,17 +11895,21 @@ extension RDS { /// Valid storage options for your DB instance. @OptionalCustomCoding> public var storage: [ValidStorageOptions]? + /// Indicates whether a DB instance supports using a dedicated log volume (DLV). + public let supportsDedicatedLogVolume: Bool? /// Valid processor features for your DB instance. @OptionalCustomCoding> public var validProcessorFeatures: [AvailableProcessorFeature]? - public init(storage: [ValidStorageOptions]? = nil, validProcessorFeatures: [AvailableProcessorFeature]? = nil) { + public init(storage: [ValidStorageOptions]? = nil, supportsDedicatedLogVolume: Bool? = nil, validProcessorFeatures: [AvailableProcessorFeature]? = nil) { self.storage = storage + self.supportsDedicatedLogVolume = supportsDedicatedLogVolume self.validProcessorFeatures = validProcessorFeatures } private enum CodingKeys: String, CodingKey { case storage = "Storage" + case supportsDedicatedLogVolume = "SupportsDedicatedLogVolume" case validProcessorFeatures = "ValidProcessorFeatures" } } @@ -11890,7 +11938,7 @@ extension RDS { public var storageThroughputToIopsRatio: [DoubleRange]? /// The valid storage types for your DB instance. For example: gp2, gp3, io1. public let storageType: String? - /// Whether or not Amazon RDS can automatically scale storage for DB instances that use the new instance class. + /// Indicates whether or not Amazon RDS can automatically scale storage for DB instances that use the new instance class. public let supportsStorageAutoscaling: Bool? public init(iopsToStorageRatio: [DoubleRange]? = nil, provisionedIops: [Range]? = nil, provisionedStorageThroughput: [Range]? = nil, storageSize: [Range]? = nil, storageThroughputToIopsRatio: [DoubleRange]? = nil, storageType: String? = nil, supportsStorageAutoscaling: Bool? = nil) { diff --git a/Sources/Soto/Services/Rekognition/Rekognition_api+async.swift b/Sources/Soto/Services/Rekognition/Rekognition_api+async.swift index a170028349..ffcbed6cf3 100644 --- a/Sources/Soto/Services/Rekognition/Rekognition_api+async.swift +++ b/Sources/Soto/Services/Rekognition/Rekognition_api+async.swift @@ -31,7 +31,7 @@ extension Rekognition { return try await self.client.execute(operation: "CompareFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED. This operation requires permissions to perform the rekognition:CopyProjectVersion action. + /// This operation applies only to Amazon Rekognition Custom Labels. Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. Copying project versions is supported only for Custom Labels models. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED. This operation requires permissions to perform the rekognition:CopyProjectVersion action. public func copyProjectVersion(_ input: CopyProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CopyProjectVersionResponse { return try await self.client.execute(operation: "CopyProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -41,7 +41,7 @@ extension Rekognition { return try await self.client.execute(operation: "CreateCollection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries + /// This operation applies only to Amazon Rekognition Custom Labels. Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries /// and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action. public func createDataset(_ input: CreateDatasetRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateDatasetResponse { return try await self.client.execute(operation: "CreateDataset", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -52,12 +52,12 @@ extension Rekognition { return try await self.client.execute(operation: "CreateFaceLivenessSession", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources (datasets, model versions) that you use to create and manage Amazon Rekognition Custom Labels models. This operation requires permissions to perform the rekognition:CreateProject action. + /// Creates a new Amazon Rekognition project. A project is a group of resources (datasets, model versions) that you use to create and manage a Amazon Rekognition Custom Labels Model or custom adapter. You can specify a feature to create the project with, if no feature is specified then Custom Labels is used by default. For adapters, you can also choose whether or not to have the project auto update by using the AutoUpdate argument. This operation requires permissions to perform the rekognition:CreateProject action. public func createProject(_ input: CreateProjectRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateProjectResponse { return try await self.client.execute(operation: "CreateProject", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model. Training uses the training and test datasets associated with the project. For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a model in a project that doesn't have associated datasets by specifying manifest files in the TrainingData and TestingData fields. If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using the most recent manifest files. You can no longer train a model version for the project by specifying manifest files. Instead of training with a project without associated datasets, we recommend that you use the manifest files to create training and test datasets for the project. Training takes a while to complete. You can get the current status by calling DescribeProjectVersions. Training completed successfully if the value of the Status field is TRAINING_COMPLETED. If training fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels developers guide. After evaluating the model, you start the model by calling StartProjectVersion. This operation requires permissions to perform the rekognition:CreateProjectVersion action. + /// Creates a new version of Amazon Rekognition project (like a Custom Labels model or a custom adapter) and begins training. Models and adapters are managed as part of a Rekognition project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the project version. The FeatureConfig operation argument allows you to configure specific model or adapter settings. You can provide a description to the project version by using the VersionDescription argment. Training can take a while to complete. You can get the current status by calling DescribeProjectVersions. Training completed successfully if the value of the Status field is TRAINING_COMPLETED. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model. This operation requires permissions to perform the rekognition:CreateProjectVersion action. The following applies only to projects with Amazon Rekognition Custom Labels as the chosen feature: You can train a model in a project that doesn't have associated datasets by specifying manifest files in the TrainingData and TestingData fields. If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using the most recent manifest files. You can no longer train a model version for the project by specifying manifest files. Instead of training with a project without associated datasets, we recommend that you use the manifest files to create training and test datasets for the project. public func createProjectVersion(_ input: CreateProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateProjectVersionResponse { return try await self.client.execute(operation: "CreateProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -77,7 +77,7 @@ extension Rekognition { return try await self.client.execute(operation: "DeleteCollection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a dataset might take while. Use DescribeDataset to check the current status. The dataset is still deleting if the value of Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get a ResourceNotFoundException exception. + /// This operation applies only to Amazon Rekognition Custom Labels. Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a dataset might take while. Use DescribeDataset to check the current status. The dataset is still deleting if the value of Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get a ResourceNotFoundException exception. /// You can't delete a dataset while it is creating (Status = CREATE_IN_PROGRESS) or if the dataset is updating (Status = UPDATE_IN_PROGRESS). This operation requires permissions to perform the rekognition:DeleteDataset action. public func deleteDataset(_ input: DeleteDatasetRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteDatasetResponse { return try await self.client.execute(operation: "DeleteDataset", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -88,17 +88,17 @@ extension Rekognition { return try await self.client.execute(operation: "DeleteFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all models associated with the project. To delete a model, see DeleteProjectVersion. DeleteProject is an asynchronous operation. To check if the project is deleted, call DescribeProjects. The project is deleted when the project no longer appears in the response. Be aware that deleting a given project will also delete any ProjectPolicies associated with that project. This operation requires permissions to perform the rekognition:DeleteProject action. + /// Deletes a Amazon Rekognition project. To delete a project you must first delete all models or adapters associated with the project. To delete a model or adapter, see DeleteProjectVersion. DeleteProject is an asynchronous operation. To check if the project is deleted, call DescribeProjects. The project is deleted when the project no longer appears in the response. Be aware that deleting a given project will also delete any ProjectPolicies associated with that project. This operation requires permissions to perform the rekognition:DeleteProject action. public func deleteProject(_ input: DeleteProjectRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteProjectResponse { return try await self.client.execute(operation: "DeleteProject", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This operation requires permissions to perform the rekognition:DeleteProjectPolicy action. + /// This operation applies only to Amazon Rekognition Custom Labels. Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This operation requires permissions to perform the rekognition:DeleteProjectPolicy action. public func deleteProjectPolicy(_ input: DeleteProjectPolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteProjectPolicyResponse { return try await self.client.execute(operation: "DeleteProjectPolicy", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an Amazon Rekognition Custom Labels model. You can't delete a model if it is running or if it is training. To check the status of a model, use the Status field returned from DescribeProjectVersions. To stop a running model call StopProjectVersion. If the model is training, wait until it finishes. This operation requires permissions to perform the rekognition:DeleteProjectVersion action. + /// Deletes a Rekognition project model or project version, like a Amazon Rekognition Custom Labels model or a custom adapter. You can't delete a project version if it is running or if it is training. To check the status of a project version, use the Status field returned from DescribeProjectVersions. To stop a project version call StopProjectVersion. If the project version is training, wait until it finishes. This operation requires permissions to perform the rekognition:DeleteProjectVersion action. public func deleteProjectVersion(_ input: DeleteProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteProjectVersionResponse { return try await self.client.execute(operation: "DeleteProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -118,6 +118,7 @@ extension Rekognition { return try await self.client.execute(operation: "DescribeCollection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// This operation applies only to Amazon Rekognition Custom Labels. /// Describes an Amazon Rekognition Custom Labels dataset. You can get information such as the current status of a dataset and /// statistics about the images and labels in a dataset. /// This operation requires permissions to perform the rekognition:DescribeDataset action. @@ -125,12 +126,12 @@ extension Rekognition { return try await self.client.execute(operation: "DescribeDataset", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Lists and describes the versions of a model in an Amazon Rekognition Custom Labels project. You can specify up to 10 model versions in ProjectVersionArns. If you don't specify a value, descriptions for all model versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. + /// Lists and describes the versions of an Amazon Rekognition project. You can specify up to 10 model or adapter versions in ProjectVersionArns. If you don't specify a value, descriptions for all model/adapter versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. public func describeProjectVersions(_ input: DescribeProjectVersionsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeProjectVersionsResponse { return try await self.client.execute(operation: "DescribeProjectVersions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets information about your Amazon Rekognition Custom Labels projects. This operation requires permissions to perform the rekognition:DescribeProjects action. + /// Gets information about your Rekognition projects. This operation requires permissions to perform the rekognition:DescribeProjects action. public func describeProjects(_ input: DescribeProjectsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeProjectsResponse { return try await self.client.execute(operation: "DescribeProjects", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -140,7 +141,7 @@ extension Rekognition { return try await self.client.execute(operation: "DescribeStreamProcessor", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model. You specify which version of a model version to use by using the ProjectVersionArn input parameter. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. + /// This operation applies only to Amazon Rekognition Custom Labels. Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model. You specify which version of a model version to use by using the ProjectVersionArn input parameter. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. /// The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide. If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectCustomLabels action. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide. public func detectCustomLabels(_ input: DetectCustomLabelsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DetectCustomLabelsResponse { return try await self.client.execute(operation: "DetectCustomLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -156,7 +157,7 @@ extension Rekognition { return try await self.client.execute(operation: "DetectLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content. To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate. For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. + /// Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content. To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate. For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. You can specify an adapter to use when retrieving label predictions by providing a ProjectVersionArn to the ProjectVersion argument. public func detectModerationLabels(_ input: DetectModerationLabelsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DetectModerationLabelsResponse { return try await self.client.execute(operation: "DetectModerationLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -176,7 +177,7 @@ extension Rekognition { return try await self.client.execute(operation: "DisassociateFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project. DistributeDatasetEntries moves 20% of the training dataset images to the test dataset. An entry is a JSON Line that describes an image. You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. The training dataset must contain the images that you want to split. The test dataset must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset. Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. If the dataset split fails, the value of Status is UPDATE_FAILED. This operation requires permissions to perform the rekognition:DistributeDatasetEntries action. + /// This operation applies only to Amazon Rekognition Custom Labels. Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project. DistributeDatasetEntries moves 20% of the training dataset images to the test dataset. An entry is a JSON Line that describes an image. You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. The training dataset must contain the images that you want to split. The test dataset must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset. Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. If the dataset split fails, the value of Status is UPDATE_FAILED. This operation requires permissions to perform the rekognition:DistributeDatasetEntries action. public func distributeDatasetEntries(_ input: DistributeDatasetEntriesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DistributeDatasetEntriesResponse { return try await self.client.execute(operation: "DistributeDatasetEntries", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -241,6 +242,7 @@ extension Rekognition { return try await self.client.execute(operation: "ListCollections", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// This operation applies only to Amazon Rekognition Custom Labels. /// Lists the entries (images) within a dataset. An entry is a /// JSON Line that contains the information for a single image, including /// the image location, assigned labels, and object location bounding boxes. For @@ -250,7 +252,7 @@ extension Rekognition { return try await self.client.execute(operation: "ListDatasetEntries", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. + /// This operation applies only to Amazon Rekognition Custom Labels. Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images in the Amazon Rekognition Custom Labels Developer Guide. public func listDatasetLabels(_ input: ListDatasetLabelsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListDatasetLabelsResponse { return try await self.client.execute(operation: "ListDatasetLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -261,7 +263,7 @@ extension Rekognition { return try await self.client.execute(operation: "ListFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. + /// This operation applies only to Amazon Rekognition Custom Labels. Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. public func listProjectPolicies(_ input: ListProjectPoliciesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListProjectPoliciesResponse { return try await self.client.execute(operation: "ListProjectPolicies", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -281,7 +283,7 @@ extension Rekognition { return try await self.client.execute(operation: "ListUsers", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action. + /// This operation applies only to Amazon Rekognition Custom Labels. Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. Only applies to Custom Labels projects. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action. public func putProjectPolicy(_ input: PutProjectPolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> PutProjectPolicyResponse { return try await self.client.execute(operation: "PutProjectPolicy", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -341,7 +343,7 @@ extension Rekognition { return try await self.client.execute(operation: "StartPersonTracking", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Starts the running of the version of a model. Starting a model takes a while to complete. To check the current state of the model, use DescribeProjectVersions. Once the model is running, you can detect custom labels in new images by calling DetectCustomLabels. You are charged for the amount of time that the model is running. To stop a running model, call StopProjectVersion. For more information, see Running a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels Guide. This operation requires permissions to perform the rekognition:StartProjectVersion action. + /// This operation applies only to Amazon Rekognition Custom Labels. Starts the running of the version of a model. Starting a model takes a while to complete. To check the current state of the model, use DescribeProjectVersions. Once the model is running, you can detect custom labels in new images by calling DetectCustomLabels. You are charged for the amount of time that the model is running. To stop a running model, call StopProjectVersion. This operation requires permissions to perform the rekognition:StartProjectVersion action. public func startProjectVersion(_ input: StartProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StartProjectVersionResponse { return try await self.client.execute(operation: "StartProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -361,7 +363,7 @@ extension Rekognition { return try await self.client.execute(operation: "StartTextDetection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions. This operation requires permissions to perform the rekognition:StopProjectVersion action. + /// This operation applies only to Amazon Rekognition Custom Labels. Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions. Only applies to Custom Labels projects. This operation requires permissions to perform the rekognition:StopProjectVersion action. public func stopProjectVersion(_ input: StopProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StopProjectVersionResponse { return try await self.client.execute(operation: "StopProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -381,7 +383,7 @@ extension Rekognition { return try await self.client.execute(operation: "UntagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the information for a single image, including the image location, assigned labels, and object location bounding boxes. For more information, see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. If the source-ref field in the JSON line references an existing image, the existing image in the dataset is updated. If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset. You specify the changes that you want to make in the Changes input parameter. There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less + /// This operation applies only to Amazon Rekognition Custom Labels. Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the information for a single image, including the image location, assigned labels, and object location bounding boxes. For more information, see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. If the source-ref field in the JSON line references an existing image, the existing image in the dataset is updated. If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset. You specify the changes that you want to make in the Changes input parameter. There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less /// than 5MB. UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete. Use DescribeDataset to check the current status. The dataset updated successfully if the value of Status is UPDATE_COMPLETE. To check if any non-terminal errors occured, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK. This operation requires permissions to perform the rekognition:UpdateDatasetEntries action. public func updateDatasetEntries(_ input: UpdateDatasetEntriesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateDatasetEntriesResponse { return try await self.client.execute(operation: "UpdateDatasetEntries", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -397,7 +399,7 @@ extension Rekognition { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension Rekognition { - /// Lists and describes the versions of a model in an Amazon Rekognition Custom Labels project. You can specify up to 10 model versions in ProjectVersionArns. If you don't specify a value, descriptions for all model versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. + /// Lists and describes the versions of an Amazon Rekognition project. You can specify up to 10 model or adapter versions in ProjectVersionArns. If you don't specify a value, descriptions for all model/adapter versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -419,7 +421,7 @@ extension Rekognition { ) } - /// Gets information about your Amazon Rekognition Custom Labels projects. This operation requires permissions to perform the rekognition:DescribeProjects action. + /// Gets information about your Rekognition projects. This operation requires permissions to perform the rekognition:DescribeProjects action. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -639,6 +641,7 @@ extension Rekognition { ) } + /// This operation applies only to Amazon Rekognition Custom Labels. /// Lists the entries (images) within a dataset. An entry is a /// JSON Line that contains the information for a single image, including /// the image location, assigned labels, and object location bounding boxes. For @@ -665,7 +668,7 @@ extension Rekognition { ) } - /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. + /// This operation applies only to Amazon Rekognition Custom Labels. Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images in the Amazon Rekognition Custom Labels Developer Guide. /// Return PaginatorSequence for operation. /// @@ -710,7 +713,7 @@ extension Rekognition { ) } - /// Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. + /// This operation applies only to Amazon Rekognition Custom Labels. Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/Rekognition/Rekognition_api.swift b/Sources/Soto/Services/Rekognition/Rekognition_api.swift index 6339b482a4..73f5edb9a0 100644 --- a/Sources/Soto/Services/Rekognition/Rekognition_api.swift +++ b/Sources/Soto/Services/Rekognition/Rekognition_api.swift @@ -84,7 +84,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "CompareFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED. This operation requires permissions to perform the rekognition:CopyProjectVersion action. + /// This operation applies only to Amazon Rekognition Custom Labels. Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. Copying project versions is supported only for Custom Labels models. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED. This operation requires permissions to perform the rekognition:CopyProjectVersion action. public func copyProjectVersion(_ input: CopyProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CopyProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -94,7 +94,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "CreateCollection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries + /// This operation applies only to Amazon Rekognition Custom Labels. Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries /// and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action. public func createDataset(_ input: CreateDatasetRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateDataset", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -105,12 +105,12 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "CreateFaceLivenessSession", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources (datasets, model versions) that you use to create and manage Amazon Rekognition Custom Labels models. This operation requires permissions to perform the rekognition:CreateProject action. + /// Creates a new Amazon Rekognition project. A project is a group of resources (datasets, model versions) that you use to create and manage a Amazon Rekognition Custom Labels Model or custom adapter. You can specify a feature to create the project with, if no feature is specified then Custom Labels is used by default. For adapters, you can also choose whether or not to have the project auto update by using the AutoUpdate argument. This operation requires permissions to perform the rekognition:CreateProject action. public func createProject(_ input: CreateProjectRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateProject", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model. Training uses the training and test datasets associated with the project. For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a model in a project that doesn't have associated datasets by specifying manifest files in the TrainingData and TestingData fields. If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using the most recent manifest files. You can no longer train a model version for the project by specifying manifest files. Instead of training with a project without associated datasets, we recommend that you use the manifest files to create training and test datasets for the project. Training takes a while to complete. You can get the current status by calling DescribeProjectVersions. Training completed successfully if the value of the Status field is TRAINING_COMPLETED. If training fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels developers guide. After evaluating the model, you start the model by calling StartProjectVersion. This operation requires permissions to perform the rekognition:CreateProjectVersion action. + /// Creates a new version of Amazon Rekognition project (like a Custom Labels model or a custom adapter) and begins training. Models and adapters are managed as part of a Rekognition project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the project version. The FeatureConfig operation argument allows you to configure specific model or adapter settings. You can provide a description to the project version by using the VersionDescription argment. Training can take a while to complete. You can get the current status by calling DescribeProjectVersions. Training completed successfully if the value of the Status field is TRAINING_COMPLETED. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model. This operation requires permissions to perform the rekognition:CreateProjectVersion action. The following applies only to projects with Amazon Rekognition Custom Labels as the chosen feature: You can train a model in a project that doesn't have associated datasets by specifying manifest files in the TrainingData and TestingData fields. If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using the most recent manifest files. You can no longer train a model version for the project by specifying manifest files. Instead of training with a project without associated datasets, we recommend that you use the manifest files to create training and test datasets for the project. public func createProjectVersion(_ input: CreateProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -130,7 +130,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DeleteCollection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a dataset might take while. Use DescribeDataset to check the current status. The dataset is still deleting if the value of Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get a ResourceNotFoundException exception. + /// This operation applies only to Amazon Rekognition Custom Labels. Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a dataset might take while. Use DescribeDataset to check the current status. The dataset is still deleting if the value of Status is DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get a ResourceNotFoundException exception. /// You can't delete a dataset while it is creating (Status = CREATE_IN_PROGRESS) or if the dataset is updating (Status = UPDATE_IN_PROGRESS). This operation requires permissions to perform the rekognition:DeleteDataset action. public func deleteDataset(_ input: DeleteDatasetRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteDataset", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -141,17 +141,17 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DeleteFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all models associated with the project. To delete a model, see DeleteProjectVersion. DeleteProject is an asynchronous operation. To check if the project is deleted, call DescribeProjects. The project is deleted when the project no longer appears in the response. Be aware that deleting a given project will also delete any ProjectPolicies associated with that project. This operation requires permissions to perform the rekognition:DeleteProject action. + /// Deletes a Amazon Rekognition project. To delete a project you must first delete all models or adapters associated with the project. To delete a model or adapter, see DeleteProjectVersion. DeleteProject is an asynchronous operation. To check if the project is deleted, call DescribeProjects. The project is deleted when the project no longer appears in the response. Be aware that deleting a given project will also delete any ProjectPolicies associated with that project. This operation requires permissions to perform the rekognition:DeleteProject action. public func deleteProject(_ input: DeleteProjectRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteProject", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This operation requires permissions to perform the rekognition:DeleteProjectPolicy action. + /// This operation applies only to Amazon Rekognition Custom Labels. Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This operation requires permissions to perform the rekognition:DeleteProjectPolicy action. public func deleteProjectPolicy(_ input: DeleteProjectPolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteProjectPolicy", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Deletes an Amazon Rekognition Custom Labels model. You can't delete a model if it is running or if it is training. To check the status of a model, use the Status field returned from DescribeProjectVersions. To stop a running model call StopProjectVersion. If the model is training, wait until it finishes. This operation requires permissions to perform the rekognition:DeleteProjectVersion action. + /// Deletes a Rekognition project model or project version, like a Amazon Rekognition Custom Labels model or a custom adapter. You can't delete a project version if it is running or if it is training. To check the status of a project version, use the Status field returned from DescribeProjectVersions. To stop a project version call StopProjectVersion. If the project version is training, wait until it finishes. This operation requires permissions to perform the rekognition:DeleteProjectVersion action. public func deleteProjectVersion(_ input: DeleteProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -171,6 +171,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DescribeCollection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// This operation applies only to Amazon Rekognition Custom Labels. /// Describes an Amazon Rekognition Custom Labels dataset. You can get information such as the current status of a dataset and /// statistics about the images and labels in a dataset. /// This operation requires permissions to perform the rekognition:DescribeDataset action. @@ -178,12 +179,12 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DescribeDataset", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Lists and describes the versions of a model in an Amazon Rekognition Custom Labels project. You can specify up to 10 model versions in ProjectVersionArns. If you don't specify a value, descriptions for all model versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. + /// Lists and describes the versions of an Amazon Rekognition project. You can specify up to 10 model or adapter versions in ProjectVersionArns. If you don't specify a value, descriptions for all model/adapter versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. public func describeProjectVersions(_ input: DescribeProjectVersionsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeProjectVersions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets information about your Amazon Rekognition Custom Labels projects. This operation requires permissions to perform the rekognition:DescribeProjects action. + /// Gets information about your Rekognition projects. This operation requires permissions to perform the rekognition:DescribeProjects action. public func describeProjects(_ input: DescribeProjectsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeProjects", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -193,7 +194,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DescribeStreamProcessor", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model. You specify which version of a model version to use by using the ProjectVersionArn input parameter. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. + /// This operation applies only to Amazon Rekognition Custom Labels. Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model. You specify which version of a model version to use by using the ProjectVersionArn input parameter. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. /// The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide. If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectCustomLabels action. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide. public func detectCustomLabels(_ input: DetectCustomLabelsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DetectCustomLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -209,7 +210,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DetectLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content. To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate. For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. + /// Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content. To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate. For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. You can specify an adapter to use when retrieving label predictions by providing a ProjectVersionArn to the ProjectVersion argument. public func detectModerationLabels(_ input: DetectModerationLabelsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DetectModerationLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -229,7 +230,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "DisassociateFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project. DistributeDatasetEntries moves 20% of the training dataset images to the test dataset. An entry is a JSON Line that describes an image. You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. The training dataset must contain the images that you want to split. The test dataset must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset. Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. If the dataset split fails, the value of Status is UPDATE_FAILED. This operation requires permissions to perform the rekognition:DistributeDatasetEntries action. + /// This operation applies only to Amazon Rekognition Custom Labels. Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project. DistributeDatasetEntries moves 20% of the training dataset images to the test dataset. An entry is a JSON Line that describes an image. You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. The training dataset must contain the images that you want to split. The test dataset must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset. Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. If the dataset split fails, the value of Status is UPDATE_FAILED. This operation requires permissions to perform the rekognition:DistributeDatasetEntries action. public func distributeDatasetEntries(_ input: DistributeDatasetEntriesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DistributeDatasetEntries", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -294,6 +295,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "ListCollections", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// This operation applies only to Amazon Rekognition Custom Labels. /// Lists the entries (images) within a dataset. An entry is a /// JSON Line that contains the information for a single image, including /// the image location, assigned labels, and object location bounding boxes. For @@ -303,7 +305,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "ListDatasetEntries", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. + /// This operation applies only to Amazon Rekognition Custom Labels. Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images in the Amazon Rekognition Custom Labels Developer Guide. public func listDatasetLabels(_ input: ListDatasetLabelsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ListDatasetLabels", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -314,7 +316,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "ListFaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. + /// This operation applies only to Amazon Rekognition Custom Labels. Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. public func listProjectPolicies(_ input: ListProjectPoliciesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ListProjectPolicies", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -334,7 +336,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "ListUsers", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action. + /// This operation applies only to Amazon Rekognition Custom Labels. Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. Only applies to Custom Labels projects. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action. public func putProjectPolicy(_ input: PutProjectPolicyRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "PutProjectPolicy", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -394,7 +396,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "StartPersonTracking", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Starts the running of the version of a model. Starting a model takes a while to complete. To check the current state of the model, use DescribeProjectVersions. Once the model is running, you can detect custom labels in new images by calling DetectCustomLabels. You are charged for the amount of time that the model is running. To stop a running model, call StopProjectVersion. For more information, see Running a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels Guide. This operation requires permissions to perform the rekognition:StartProjectVersion action. + /// This operation applies only to Amazon Rekognition Custom Labels. Starts the running of the version of a model. Starting a model takes a while to complete. To check the current state of the model, use DescribeProjectVersions. Once the model is running, you can detect custom labels in new images by calling DetectCustomLabels. You are charged for the amount of time that the model is running. To stop a running model, call StopProjectVersion. This operation requires permissions to perform the rekognition:StartProjectVersion action. public func startProjectVersion(_ input: StartProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "StartProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -414,7 +416,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "StartTextDetection", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions. This operation requires permissions to perform the rekognition:StopProjectVersion action. + /// This operation applies only to Amazon Rekognition Custom Labels. Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions. Only applies to Custom Labels projects. This operation requires permissions to perform the rekognition:StopProjectVersion action. public func stopProjectVersion(_ input: StopProjectVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "StopProjectVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -434,7 +436,7 @@ public struct Rekognition: AWSService { return self.client.execute(operation: "UntagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the information for a single image, including the image location, assigned labels, and object location bounding boxes. For more information, see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. If the source-ref field in the JSON line references an existing image, the existing image in the dataset is updated. If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset. You specify the changes that you want to make in the Changes input parameter. There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less + /// This operation applies only to Amazon Rekognition Custom Labels. Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the information for a single image, including the image location, assigned labels, and object location bounding boxes. For more information, see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. If the source-ref field in the JSON line references an existing image, the existing image in the dataset is updated. If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset. You specify the changes that you want to make in the Changes input parameter. There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less /// than 5MB. UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete. Use DescribeDataset to check the current status. The dataset updated successfully if the value of Status is UPDATE_COMPLETE. To check if any non-terminal errors occured, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK. This operation requires permissions to perform the rekognition:UpdateDatasetEntries action. public func updateDatasetEntries(_ input: UpdateDatasetEntriesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "UpdateDatasetEntries", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -458,7 +460,7 @@ extension Rekognition { // MARK: Paginators extension Rekognition { - /// Lists and describes the versions of a model in an Amazon Rekognition Custom Labels project. You can specify up to 10 model versions in ProjectVersionArns. If you don't specify a value, descriptions for all model versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. + /// Lists and describes the versions of an Amazon Rekognition project. You can specify up to 10 model or adapter versions in ProjectVersionArns. If you don't specify a value, descriptions for all model/adapter versions in the project are returned. This operation requires permissions to perform the rekognition:DescribeProjectVersions action. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. @@ -511,7 +513,7 @@ extension Rekognition { ) } - /// Gets information about your Amazon Rekognition Custom Labels projects. This operation requires permissions to perform the rekognition:DescribeProjects action. + /// Gets information about your Rekognition projects. This operation requires permissions to perform the rekognition:DescribeProjects action. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. @@ -1041,6 +1043,7 @@ extension Rekognition { ) } + /// This operation applies only to Amazon Rekognition Custom Labels. /// Lists the entries (images) within a dataset. An entry is a /// JSON Line that contains the information for a single image, including /// the image location, assigned labels, and object location bounding boxes. For @@ -1098,7 +1101,7 @@ extension Rekognition { ) } - /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. + /// This operation applies only to Amazon Rekognition Custom Labels. Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images. /// Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images in the Amazon Rekognition Custom Labels Developer Guide. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. @@ -1205,7 +1208,7 @@ extension Rekognition { ) } - /// Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. + /// This operation applies only to Amazon Rekognition Custom Labels. Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. @@ -1379,6 +1382,7 @@ extension Rekognition.DescribeProjectVersionsRequest: AWSPaginateToken { extension Rekognition.DescribeProjectsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Rekognition.DescribeProjectsRequest { return .init( + features: self.features, maxResults: self.maxResults, nextToken: token, projectNames: self.projectNames diff --git a/Sources/Soto/Services/Rekognition/Rekognition_shapes.swift b/Sources/Soto/Services/Rekognition/Rekognition_shapes.swift index 38720d3828..e67b39f5da 100644 --- a/Sources/Soto/Services/Rekognition/Rekognition_shapes.swift +++ b/Sources/Soto/Services/Rekognition/Rekognition_shapes.swift @@ -76,6 +76,12 @@ extension Rekognition { public var description: String { return self.rawValue } } + public enum CustomizationFeature: String, CustomStringConvertible, Codable, Sendable { + case contentModeration = "CONTENT_MODERATION" + case customLabels = "CUSTOM_LABELS" + public var description: String { return self.rawValue } + } + public enum DatasetStatus: String, CustomStringConvertible, Codable, Sendable { case createComplete = "CREATE_COMPLETE" case createFailed = "CREATE_FAILED" @@ -219,6 +225,12 @@ extension Rekognition { public var description: String { return self.rawValue } } + public enum ProjectAutoUpdate: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ProjectStatus: String, CustomStringConvertible, Codable, Sendable { case created = "CREATED" case creating = "CREATING" @@ -231,6 +243,8 @@ extension Rekognition { case copyingFailed = "COPYING_FAILED" case copyingInProgress = "COPYING_IN_PROGRESS" case deleting = "DELETING" + case deprecated = "DEPRECATED" + case expired = "EXPIRED" case failed = "FAILED" case running = "RUNNING" case starting = "STARTING" @@ -1138,10 +1152,16 @@ extension Rekognition { } public struct CreateProjectRequest: AWSEncodableShape { + /// Specifies whether automatic retraining should be attempted for the versions of the project. Automatic retraining is done as a best effort. Required argument for Content Moderation. Applicable only to adapters. + public let autoUpdate: ProjectAutoUpdate? + /// Specifies feature that is being customized. If no value is provided CUSTOM_LABELS is used as a default. + public let feature: CustomizationFeature? /// The name of the project to create. public let projectName: String - public init(projectName: String) { + public init(autoUpdate: ProjectAutoUpdate? = nil, feature: CustomizationFeature? = nil, projectName: String) { + self.autoUpdate = autoUpdate + self.feature = feature self.projectName = projectName } @@ -1152,6 +1172,8 @@ extension Rekognition { } private enum CodingKeys: String, CodingKey { + case autoUpdate = "AutoUpdate" + case feature = "Feature" case projectName = "ProjectName" } } @@ -1170,32 +1192,39 @@ extension Rekognition { } public struct CreateProjectVersionRequest: AWSEncodableShape { - /// The identifier for your AWS Key Management Service key (AWS KMS key). You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias for your KMS key, or an alias ARN. The key is used to encrypt training and test images copied into the service for model training. Your source images are unaffected. The key is also used to encrypt training results and manifest files written to the output Amazon S3 bucket (OutputConfig). If you choose to use your own KMS key, you need the following permissions on the KMS key. kms:CreateGrant kms:DescribeKey kms:GenerateDataKey kms:Decrypt If you don't specify a value for KmsKeyId, images copied into the service are encrypted using a key that AWS owns and manages. + /// Feature-specific configuration of the training job. If the job configuration does not match the feature type associated with the project, an InvalidParameterException is returned. + public let featureConfig: CustomizationFeatureConfig? + /// The identifier for your AWS Key Management Service key (AWS KMS key). You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias for your KMS key, or an alias ARN. The key is used to encrypt training images, test images, and manifest files copied into the service for the project version. Your source images are unaffected. The key is also used to encrypt training results and manifest files written to the output Amazon S3 bucket (OutputConfig). If you choose to use your own KMS key, you need the following permissions on the KMS key. kms:CreateGrant kms:DescribeKey kms:GenerateDataKey kms:Decrypt If you don't specify a value for KmsKeyId, images copied into the service are encrypted using a key that AWS owns and manages. public let kmsKeyId: String? - /// The Amazon S3 bucket location to store the results of training. The S3 bucket can be in any AWS account as long as the caller has s3:PutObject permissions on the S3 bucket. + /// The Amazon S3 bucket location to store the results of training. The bucket can be any S3 bucket in your AWS account. You need s3:PutObject permission on the bucket. public let outputConfig: OutputConfig - /// The ARN of the Amazon Rekognition Custom Labels project that manages the model that you want to train. + /// The ARN of the Amazon Rekognition project that will manage the project version you want to train. public let projectArn: String - /// A set of tags (key-value pairs) that you want to attach to the model. + /// A set of tags (key-value pairs) that you want to attach to the project version. public let tags: [String: String]? - /// Specifies an external manifest that the service uses to test the model. If you specify TestingData you must also specify TrainingData. The project must not have any associated datasets. + /// Specifies an external manifest that the service uses to test the project version. If you specify TestingData you must also specify TrainingData. The project must not have any associated datasets. public let testingData: TestingData? - /// Specifies an external manifest that the services uses to train the model. If you specify TrainingData you must also specify TestingData. The project must not have any associated datasets. + /// Specifies an external manifest that the services uses to train the project version. If you specify TrainingData you must also specify TestingData. The project must not have any associated datasets. public let trainingData: TrainingData? - /// A name for the version of the model. This value must be unique. + /// A description applied to the project version being created. + public let versionDescription: String? + /// A name for the version of the project version. This value must be unique. public let versionName: String - public init(kmsKeyId: String? = nil, outputConfig: OutputConfig, projectArn: String, tags: [String: String]? = nil, testingData: TestingData? = nil, trainingData: TrainingData? = nil, versionName: String) { + public init(featureConfig: CustomizationFeatureConfig? = nil, kmsKeyId: String? = nil, outputConfig: OutputConfig, projectArn: String, tags: [String: String]? = nil, testingData: TestingData? = nil, trainingData: TrainingData? = nil, versionDescription: String? = nil, versionName: String) { + self.featureConfig = featureConfig self.kmsKeyId = kmsKeyId self.outputConfig = outputConfig self.projectArn = projectArn self.tags = tags self.testingData = testingData self.trainingData = trainingData + self.versionDescription = versionDescription self.versionName = versionName } public func validate(name: String) throws { + try self.featureConfig?.validate(name: "\(name).featureConfig") try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048) try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1) try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$") @@ -1213,24 +1242,29 @@ extension Rekognition { try self.validate(self.tags, name: "tags", parent: name, max: 200) try self.testingData?.validate(name: "\(name).testingData") try self.trainingData?.validate(name: "\(name).trainingData") + try self.validate(self.versionDescription, name: "versionDescription", parent: name, max: 255) + try self.validate(self.versionDescription, name: "versionDescription", parent: name, min: 1) + try self.validate(self.versionDescription, name: "versionDescription", parent: name, pattern: "^[a-zA-Z0-9-_. ()':,;?]+$") try self.validate(self.versionName, name: "versionName", parent: name, max: 255) try self.validate(self.versionName, name: "versionName", parent: name, min: 1) try self.validate(self.versionName, name: "versionName", parent: name, pattern: "^[a-zA-Z0-9_.\\-]+$") } private enum CodingKeys: String, CodingKey { + case featureConfig = "FeatureConfig" case kmsKeyId = "KmsKeyId" case outputConfig = "OutputConfig" case projectArn = "ProjectArn" case tags = "Tags" case testingData = "TestingData" case trainingData = "TrainingData" + case versionDescription = "VersionDescription" case versionName = "VersionName" } } public struct CreateProjectVersionResponse: AWSDecodableShape { - /// The ARN of the model version that was created. Use DescribeProjectVersion to get the current status of the training operation. + /// The ARN of the model or the project version that was created. Use DescribeProjectVersion to get the current status of the training operation. public let projectVersionArn: String? public init(projectVersionArn: String? = nil) { @@ -1384,6 +1418,41 @@ extension Rekognition { } } + public struct CustomizationFeatureConfig: AWSEncodableShape & AWSDecodableShape { + /// Configuration options for Custom Moderation training. + public let contentModeration: CustomizationFeatureContentModerationConfig? + + public init(contentModeration: CustomizationFeatureContentModerationConfig? = nil) { + self.contentModeration = contentModeration + } + + public func validate(name: String) throws { + try self.contentModeration?.validate(name: "\(name).contentModeration") + } + + private enum CodingKeys: String, CodingKey { + case contentModeration = "ContentModeration" + } + } + + public struct CustomizationFeatureContentModerationConfig: AWSEncodableShape & AWSDecodableShape { + /// The confidence level you plan to use to identify if unsafe content is present during inference. + public let confidenceThreshold: Float? + + public init(confidenceThreshold: Float? = nil) { + self.confidenceThreshold = confidenceThreshold + } + + public func validate(name: String) throws { + try self.validate(self.confidenceThreshold, name: "confidenceThreshold", parent: name, max: 100.0) + try self.validate(self.confidenceThreshold, name: "confidenceThreshold", parent: name, min: 0.0) + } + + private enum CodingKeys: String, CodingKey { + case confidenceThreshold = "ConfidenceThreshold" + } + } + public struct DatasetChanges: AWSEncodableShape { /// A Base64-encoded binary data object containing one or JSON lines that either update the dataset or are additions to the dataset. You change a dataset by calling UpdateDatasetEntries. If you are using an AWS SDK to call UpdateDatasetEntries, you don't need to encode Changes as the SDK encodes the data for you. /// For example JSON lines, see Image-Level labels in manifest files and and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. @@ -1720,7 +1789,7 @@ extension Rekognition { } public struct DeleteProjectVersionRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the model version that you want to delete. + /// The Amazon Resource Name (ARN) of the project version that you want to delete. public let projectVersionArn: String public init(projectVersionArn: String) { @@ -1894,11 +1963,11 @@ extension Rekognition { public struct DescribeProjectVersionsRequest: AWSEncodableShape { /// The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100. public let maxResults: Int? - /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. + /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. public let nextToken: String? - /// The Amazon Resource Name (ARN) of the project that contains the models you want to describe. + /// The Amazon Resource Name (ARN) of the project that contains the model/adapter you want to describe. public let projectArn: String - /// A list of model version names that you want to describe. You can add up to 10 model version names to the list. If you don't specify a value, all model descriptions are returned. A version name is part of a model (ProjectVersion) ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the following ARN. arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123. + /// A list of model or project version names that you want to describe. You can add up to 10 model or project version names to the list. If you don't specify a value, all project version descriptions are returned. A version name is part of a project version ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the following ARN. arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123. public let versionNames: [String]? public init(maxResults: Int? = nil, nextToken: String? = nil, projectArn: String, versionNames: [String]? = nil) { @@ -1933,9 +2002,9 @@ extension Rekognition { } public struct DescribeProjectVersionsResponse: AWSDecodableShape { - /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. + /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. public let nextToken: String? - /// A list of model descriptions. The list is sorted by the creation date and time of the model versions, latest to earliest. + /// A list of project version descriptions. The list is sorted by the creation date and time of the project versions, latest to earliest. public let projectVersionDescriptions: [ProjectVersionDescription]? public init(nextToken: String? = nil, projectVersionDescriptions: [ProjectVersionDescription]? = nil) { @@ -1950,20 +2019,25 @@ extension Rekognition { } public struct DescribeProjectsRequest: AWSEncodableShape { + /// Specifies the type of customization to filter projects by. If no value is specified, CUSTOM_LABELS is used as a default. + public let features: [CustomizationFeature]? /// The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100. public let maxResults: Int? - /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. + /// If the previous response was incomplete (because there is more results to retrieve), Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. public let nextToken: String? - /// A list of the projects that you want Amazon Rekognition Custom Labels to describe. If you don't specify a value, the response includes descriptions for all the projects in your AWS account. + /// A list of the projects that you want Rekognition to describe. If you don't specify a value, the response includes descriptions for all the projects in your AWS account. public let projectNames: [String]? - public init(maxResults: Int? = nil, nextToken: String? = nil, projectNames: [String]? = nil) { + public init(features: [CustomizationFeature]? = nil, maxResults: Int? = nil, nextToken: String? = nil, projectNames: [String]? = nil) { + self.features = features self.maxResults = maxResults self.nextToken = nextToken self.projectNames = projectNames } public func validate(name: String) throws { + try self.validate(self.features, name: "features", parent: name, max: 2) + try self.validate(self.features, name: "features", parent: name, min: 1) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) @@ -1977,6 +2051,7 @@ extension Rekognition { } private enum CodingKeys: String, CodingKey { + case features = "Features" case maxResults = "MaxResults" case nextToken = "NextToken" case projectNames = "ProjectNames" @@ -1984,7 +2059,7 @@ extension Rekognition { } public struct DescribeProjectsResponse: AWSDecodableShape { - /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. + /// If the previous response was incomplete (because there is more results to retrieve), Amazon Rekognition returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. public let nextToken: String? /// A list of project descriptions. The list is sorted by the date and time the projects are created. public let projectDescriptions: [ProjectDescription]? @@ -2089,7 +2164,7 @@ extension Rekognition { public let maxResults: Int? /// Specifies the minimum confidence level for the labels to return. DetectCustomLabels doesn't return any labels with a confidence value that's lower than this specified value. If you specify a value of 0, DetectCustomLabels returns all labels, regardless of the assumed threshold applied to each label. If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label. public let minConfidence: Float? - /// The ARN of the model version that you want to use. + /// The ARN of the model version that you want to use. Only models associated with Custom Labels projects accepted by the operation. If a provided ARN refers to a model version associated with a project for a different feature type, then an InvalidParameterException is returned. public let projectVersionArn: String public init(image: Image, maxResults: Int? = nil, minConfidence: Float? = nil, projectVersionArn: String) { @@ -2358,11 +2433,14 @@ extension Rekognition { public let image: Image /// Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with a confidence level lower than this specified value. If you don't specify MinConfidence, the operation returns labels with confidence values greater than or equal to 50 percent. public let minConfidence: Float? + /// Identifier for the custom adapter. Expects the ProjectVersionArn as a value. Use the CreateProject or CreateProjectVersion APIs to create a custom adapter. + public let projectVersion: String? - public init(humanLoopConfig: HumanLoopConfig? = nil, image: Image, minConfidence: Float? = nil) { + public init(humanLoopConfig: HumanLoopConfig? = nil, image: Image, minConfidence: Float? = nil, projectVersion: String? = nil) { self.humanLoopConfig = humanLoopConfig self.image = image self.minConfidence = minConfidence + self.projectVersion = projectVersion } public func validate(name: String) throws { @@ -2370,12 +2448,16 @@ extension Rekognition { try self.image.validate(name: "\(name).image") try self.validate(self.minConfidence, name: "minConfidence", parent: name, max: 100.0) try self.validate(self.minConfidence, name: "minConfidence", parent: name, min: 0.0) + try self.validate(self.projectVersion, name: "projectVersion", parent: name, max: 2048) + try self.validate(self.projectVersion, name: "projectVersion", parent: name, min: 20) + try self.validate(self.projectVersion, name: "projectVersion", parent: name, pattern: "^(^arn:[a-z\\d-]+:rekognition:[a-z\\d-]+:\\d{12}:project\\/[a-zA-Z0-9_.\\-]{1,255}\\/version\\/[a-zA-Z0-9_.\\-]{1,255}\\/[0-9]+$)$") } private enum CodingKeys: String, CodingKey { case humanLoopConfig = "HumanLoopConfig" case image = "Image" case minConfidence = "MinConfidence" + case projectVersion = "ProjectVersion" } } @@ -2384,19 +2466,23 @@ extension Rekognition { public let humanLoopActivationOutput: HumanLoopActivationOutput? /// Array of detected Moderation labels and the time, in milliseconds from the start of the video, they were detected. public let moderationLabels: [ModerationLabel]? - /// Version number of the moderation detection model that was used to detect unsafe content. + /// Version number of the base moderation detection model that was used to detect unsafe content. public let moderationModelVersion: String? + /// Identifier of the custom adapter that was used during inference. If during inference the adapter was EXPIRED, then the parameter will not be returned, indicating that a base moderation detection project version was used. + public let projectVersion: String? - public init(humanLoopActivationOutput: HumanLoopActivationOutput? = nil, moderationLabels: [ModerationLabel]? = nil, moderationModelVersion: String? = nil) { + public init(humanLoopActivationOutput: HumanLoopActivationOutput? = nil, moderationLabels: [ModerationLabel]? = nil, moderationModelVersion: String? = nil, projectVersion: String? = nil) { self.humanLoopActivationOutput = humanLoopActivationOutput self.moderationLabels = moderationLabels self.moderationModelVersion = moderationModelVersion + self.projectVersion = projectVersion } private enum CodingKeys: String, CodingKey { case humanLoopActivationOutput = "HumanLoopActivationOutput" case moderationLabels = "ModerationLabels" case moderationModelVersion = "ModerationModelVersion" + case projectVersion = "ProjectVersion" } } @@ -4878,25 +4964,33 @@ extension Rekognition { } public struct ProjectDescription: AWSDecodableShape { + /// Indicates whether automatic retraining will be attempted for the versions of the project. Applies only to adapters. + public let autoUpdate: ProjectAutoUpdate? /// The Unix timestamp for the date and time that the project was created. public let creationTimestamp: Date? /// Information about the training and test datasets in the project. public let datasets: [DatasetMetadata]? + /// Specifies the project that is being customized. + public let feature: CustomizationFeature? /// The Amazon Resource Name (ARN) of the project. public let projectArn: String? /// The current status of the project. public let status: ProjectStatus? - public init(creationTimestamp: Date? = nil, datasets: [DatasetMetadata]? = nil, projectArn: String? = nil, status: ProjectStatus? = nil) { + public init(autoUpdate: ProjectAutoUpdate? = nil, creationTimestamp: Date? = nil, datasets: [DatasetMetadata]? = nil, feature: CustomizationFeature? = nil, projectArn: String? = nil, status: ProjectStatus? = nil) { + self.autoUpdate = autoUpdate self.creationTimestamp = creationTimestamp self.datasets = datasets + self.feature = feature self.projectArn = projectArn self.status = status } private enum CodingKeys: String, CodingKey { + case autoUpdate = "AutoUpdate" case creationTimestamp = "CreationTimestamp" case datasets = "Datasets" + case feature = "Feature" case projectArn = "ProjectArn" case status = "Status" } @@ -4936,23 +5030,29 @@ extension Rekognition { } public struct ProjectVersionDescription: AWSDecodableShape { + /// The base detection model version used to create the project version. + public let baseModelVersion: String? /// The duration, in seconds, that you were billed for a successful training of the model version. This value is only returned if the model version has been successfully trained. public let billableTrainingTimeInSeconds: Int64? /// The Unix datetime for the date and time that training started. public let creationTimestamp: Date? /// The training results. EvaluationResult is only returned if training is successful. public let evaluationResult: EvaluationResult? + /// The feature that was customized. + public let feature: CustomizationFeature? + /// Feature specific configuration that was applied during training. + public let featureConfig: CustomizationFeatureConfig? /// The identifer for the AWS Key Management Service key (AWS KMS key) that was used to encrypt the model during training. public let kmsKeyId: String? /// The location of the summary manifest. The summary manifest provides aggregate data validation results for the training and test datasets. public let manifestSummary: GroundTruthManifest? - /// The maximum number of inference units Amazon Rekognition Custom Labels uses to auto-scale the model. For more information, see StartProjectVersion. + /// The maximum number of inference units Amazon Rekognition uses to auto-scale the model. Applies only to Custom Labels projects. For more information, see StartProjectVersion. public let maxInferenceUnits: Int? - /// The minimum number of inference units used by the model. For more information, see StartProjectVersion. + /// The minimum number of inference units used by the model. Applies only to Custom Labels projects. For more information, see StartProjectVersion. public let minInferenceUnits: Int? /// The location where training results are saved. public let outputConfig: OutputConfig? - /// The Amazon Resource Name (ARN) of the model version. + /// The Amazon Resource Name (ARN) of the project version. public let projectVersionArn: String? /// If the model version was copied from a different project, SourceProjectVersionArn contains the ARN of the source model version. public let sourceProjectVersionArn: String? @@ -4966,11 +5066,16 @@ extension Rekognition { public let trainingDataResult: TrainingDataResult? /// The Unix date and time that training of the model ended. public let trainingEndTimestamp: Date? + /// A user-provided description of the project version. + public let versionDescription: String? - public init(billableTrainingTimeInSeconds: Int64? = nil, creationTimestamp: Date? = nil, evaluationResult: EvaluationResult? = nil, kmsKeyId: String? = nil, manifestSummary: GroundTruthManifest? = nil, maxInferenceUnits: Int? = nil, minInferenceUnits: Int? = nil, outputConfig: OutputConfig? = nil, projectVersionArn: String? = nil, sourceProjectVersionArn: String? = nil, status: ProjectVersionStatus? = nil, statusMessage: String? = nil, testingDataResult: TestingDataResult? = nil, trainingDataResult: TrainingDataResult? = nil, trainingEndTimestamp: Date? = nil) { + public init(baseModelVersion: String? = nil, billableTrainingTimeInSeconds: Int64? = nil, creationTimestamp: Date? = nil, evaluationResult: EvaluationResult? = nil, feature: CustomizationFeature? = nil, featureConfig: CustomizationFeatureConfig? = nil, kmsKeyId: String? = nil, manifestSummary: GroundTruthManifest? = nil, maxInferenceUnits: Int? = nil, minInferenceUnits: Int? = nil, outputConfig: OutputConfig? = nil, projectVersionArn: String? = nil, sourceProjectVersionArn: String? = nil, status: ProjectVersionStatus? = nil, statusMessage: String? = nil, testingDataResult: TestingDataResult? = nil, trainingDataResult: TrainingDataResult? = nil, trainingEndTimestamp: Date? = nil, versionDescription: String? = nil) { + self.baseModelVersion = baseModelVersion self.billableTrainingTimeInSeconds = billableTrainingTimeInSeconds self.creationTimestamp = creationTimestamp self.evaluationResult = evaluationResult + self.feature = feature + self.featureConfig = featureConfig self.kmsKeyId = kmsKeyId self.manifestSummary = manifestSummary self.maxInferenceUnits = maxInferenceUnits @@ -4983,12 +5088,16 @@ extension Rekognition { self.testingDataResult = testingDataResult self.trainingDataResult = trainingDataResult self.trainingEndTimestamp = trainingEndTimestamp + self.versionDescription = versionDescription } private enum CodingKeys: String, CodingKey { + case baseModelVersion = "BaseModelVersion" case billableTrainingTimeInSeconds = "BillableTrainingTimeInSeconds" case creationTimestamp = "CreationTimestamp" case evaluationResult = "EvaluationResult" + case feature = "Feature" + case featureConfig = "FeatureConfig" case kmsKeyId = "KmsKeyId" case manifestSummary = "ManifestSummary" case maxInferenceUnits = "MaxInferenceUnits" @@ -5001,6 +5110,7 @@ extension Rekognition { case testingDataResult = "TestingDataResult" case trainingDataResult = "TrainingDataResult" case trainingEndTimestamp = "TrainingEndTimestamp" + case versionDescription = "VersionDescription" } } @@ -5991,7 +6101,7 @@ extension Rekognition { public struct StartProjectVersionRequest: AWSEncodableShape { /// The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Rekognition Custom Labels doesn't auto-scale the model. public let maxInferenceUnits: Int? - /// The minimum number of inference units to use. A single inference unit represents 1 hour of processing. For information about the number of transactions per second (TPS) that an inference unit can support, see Running a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels Guide. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. + /// The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. public let minInferenceUnits: Int /// The Amazon Resource Name(ARN) of the model version that you want to start. public let projectVersionArn: String @@ -6268,7 +6378,7 @@ extension Rekognition { } public struct StopProjectVersionRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the model version that you want to delete. This operation requires permissions to perform the rekognition:StopProjectVersion action. + /// The Amazon Resource Name (ARN) of the model version that you want to stop. This operation requires permissions to perform the rekognition:StopProjectVersion action. public let projectVersionArn: String public init(projectVersionArn: String) { @@ -6564,7 +6674,7 @@ extension Rekognition { public struct TestingData: AWSEncodableShape & AWSDecodableShape { /// The assets used for testing. public let assets: [Asset]? - /// If specified, Amazon Rekognition Custom Labels temporarily splits the training dataset (80%) to create a test dataset (20%) for the training job. After training completes, the test dataset is not stored and the training dataset reverts to its previous size. + /// If specified, Rekognition splits training dataset to create a test dataset for the training job. public let autoCreate: Bool? public init(assets: [Asset]? = nil, autoCreate: Bool? = nil) { @@ -6656,7 +6766,7 @@ extension Rekognition { } public struct TrainingData: AWSEncodableShape & AWSDecodableShape { - /// A Sagemaker GroundTruth manifest file that contains the training images (assets). + /// A manifest file that contains references to the training images and ground-truth annotations. public let assets: [Asset]? public init(assets: [Asset]? = nil) { @@ -6675,11 +6785,11 @@ extension Rekognition { } public struct TrainingDataResult: AWSDecodableShape { - /// The training assets that you supplied for training. + /// The training data that you supplied. public let input: TrainingData? - /// The images (assets) that were actually trained by Amazon Rekognition Custom Labels. + /// Reference to images (assets) that were actually used during training with trained model predictions. public let output: TrainingData? - /// The location of the data validation manifest. The data validation manifest is created for the training dataset during model training. + /// A manifest that you supplied for training, with validation results for each line. public let validation: ValidationData? public init(input: TrainingData? = nil, output: TrainingData? = nil, validation: ValidationData? = nil) { @@ -7067,7 +7177,7 @@ public struct RekognitionErrorType: AWSErrorType { public static var invalidPolicyRevisionIdException: Self { .init(.invalidPolicyRevisionIdException) } /// Amazon Rekognition is unable to access the S3 object specified in the request. public static var invalidS3ObjectException: Self { .init(.invalidS3ObjectException) } - /// An Amazon Rekognition service limit was exceeded. For example, if you start too many Amazon Rekognition Video jobs concurrently, calls to start operations (StartLabelDetection, for example) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit. + /// An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs concurrently, subsequent calls to start operations (ex: StartLabelDetection) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit. public static var limitExceededException: Self { .init(.limitExceededException) } /// The format of the project policy document that you supplied to PutProjectPolicy is incorrect. public static var malformedPolicyDocumentException: Self { .init(.malformedPolicyDocumentException) } diff --git a/Sources/Soto/Services/Route53/Route53_api+async.swift b/Sources/Soto/Services/Route53/Route53_api+async.swift index 7535d33542..f675617499 100644 --- a/Sources/Soto/Services/Route53/Route53_api+async.swift +++ b/Sources/Soto/Services/Route53/Route53_api+async.swift @@ -79,8 +79,8 @@ extension Route53 { /// Guide. Create, Delete, and Upsert Use ChangeResourceRecordsSetsRequest to perform the following /// actions: CREATE: Creates a resource record set that has the specified /// values. DELETE: Deletes an existing resource record set that has the - /// specified values. UPSERT: If a resource set exists Route 53 updates it with the - /// values in the request. Syntaxes for Creating, Updating, and Deleting Resource Record + /// specified values. UPSERT: If a resource set doesn't exist, Route 53 creates it. If a resource + /// set exists Route 53 updates it with the values in the request. Syntaxes for Creating, Updating, and Deleting Resource Record /// Sets The syntax for a request depends on the type of resource record set that you want to /// create, delete, or update, such as weighted, alias, or failover. The XML elements in /// your request must appear in the order listed in the syntax. For an example for each type of resource record set, see "Examples." Don't refer to the syntax in the "Parameter Syntax" section, which includes @@ -269,7 +269,12 @@ extension Route53 { /// associates the resource record sets with a specified domain name (such as example.com) /// or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for /// the domain or subdomain name by using the resource record sets that - /// CreateTrafficPolicyInstance created. + /// CreateTrafficPolicyInstance created. After you submit an CreateTrafficPolicyInstance request, there's a + /// brief delay while Amazon Route 53 creates the resource record sets that are + /// specified in the traffic policy definition. + /// Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance + /// request completed successfully. For more information, see the + /// State response element. public func createTrafficPolicyInstance(_ input: CreateTrafficPolicyInstanceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateTrafficPolicyInstanceResponse { return try await self.client.execute(operation: "CreateTrafficPolicyInstance", path: "/2013-04-01/trafficpolicyinstance", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -550,10 +555,10 @@ extension Route53 { return try await self.client.execute(operation: "GetTrafficPolicy", path: "/2013-04-01/trafficpolicy/{Id}/{Version}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets information about a specified traffic policy instance. After you submit a CreateTrafficPolicyInstance or an - /// UpdateTrafficPolicyInstance request, there's a brief delay while - /// Amazon Route 53 creates the resource record sets that are specified in the traffic - /// policy definition. For more information, see the State response + /// Gets information about a specified traffic policy instance. + /// Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the + /// CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request completed successfully. + /// For more information, see the State response /// element. In the Route 53 console, traffic policy instances are known as policy /// records. public func getTrafficPolicyInstance(_ input: GetTrafficPolicyInstanceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetTrafficPolicyInstanceResponse { @@ -796,7 +801,10 @@ extension Route53 { return try await self.client.execute(operation: "UpdateTrafficPolicyComment", path: "/2013-04-01/trafficpolicy/{Id}/{Version}", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Updates the resource record sets in a specified hosted zone that were created based on + /// After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while Route 53 creates the resource record sets + /// that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance confirm + /// that the + /// UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element. Updates the resource record sets in a specified hosted zone that were created based on /// the settings in a specified traffic policy version. When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS /// queries for the root resource record set name (such as example.com) while it replaces /// one group of resource record sets with another. Route 53 performs the following diff --git a/Sources/Soto/Services/Route53/Route53_api.swift b/Sources/Soto/Services/Route53/Route53_api.swift index 635abb9477..ee7aa2df8d 100644 --- a/Sources/Soto/Services/Route53/Route53_api.swift +++ b/Sources/Soto/Services/Route53/Route53_api.swift @@ -141,8 +141,8 @@ public struct Route53: AWSService { /// Guide. Create, Delete, and Upsert Use ChangeResourceRecordsSetsRequest to perform the following /// actions: CREATE: Creates a resource record set that has the specified /// values. DELETE: Deletes an existing resource record set that has the - /// specified values. UPSERT: If a resource set exists Route 53 updates it with the - /// values in the request. Syntaxes for Creating, Updating, and Deleting Resource Record + /// specified values. UPSERT: If a resource set doesn't exist, Route 53 creates it. If a resource + /// set exists Route 53 updates it with the values in the request. Syntaxes for Creating, Updating, and Deleting Resource Record /// Sets The syntax for a request depends on the type of resource record set that you want to /// create, delete, or update, such as weighted, alias, or failover. The XML elements in /// your request must appear in the order listed in the syntax. For an example for each type of resource record set, see "Examples." Don't refer to the syntax in the "Parameter Syntax" section, which includes @@ -331,7 +331,12 @@ public struct Route53: AWSService { /// associates the resource record sets with a specified domain name (such as example.com) /// or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for /// the domain or subdomain name by using the resource record sets that - /// CreateTrafficPolicyInstance created. + /// CreateTrafficPolicyInstance created. After you submit an CreateTrafficPolicyInstance request, there's a + /// brief delay while Amazon Route 53 creates the resource record sets that are + /// specified in the traffic policy definition. + /// Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance + /// request completed successfully. For more information, see the + /// State response element. public func createTrafficPolicyInstance(_ input: CreateTrafficPolicyInstanceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateTrafficPolicyInstance", path: "/2013-04-01/trafficpolicyinstance", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -612,10 +617,10 @@ public struct Route53: AWSService { return self.client.execute(operation: "GetTrafficPolicy", path: "/2013-04-01/trafficpolicy/{Id}/{Version}", httpMethod: .GET, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets information about a specified traffic policy instance. After you submit a CreateTrafficPolicyInstance or an - /// UpdateTrafficPolicyInstance request, there's a brief delay while - /// Amazon Route 53 creates the resource record sets that are specified in the traffic - /// policy definition. For more information, see the State response + /// Gets information about a specified traffic policy instance. + /// Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the + /// CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request completed successfully. + /// For more information, see the State response /// element. In the Route 53 console, traffic policy instances are known as policy /// records. public func getTrafficPolicyInstance(_ input: GetTrafficPolicyInstanceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { @@ -858,7 +863,10 @@ public struct Route53: AWSService { return self.client.execute(operation: "UpdateTrafficPolicyComment", path: "/2013-04-01/trafficpolicy/{Id}/{Version}", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Updates the resource record sets in a specified hosted zone that were created based on + /// After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while Route 53 creates the resource record sets + /// that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance confirm + /// that the + /// UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element. Updates the resource record sets in a specified hosted zone that were created based on /// the settings in a specified traffic policy version. When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS /// queries for the root resource record set name (such as example.com) while it replaces /// one group of resource record sets with another. Route 53 performs the following @@ -1258,6 +1266,7 @@ extension Route53.ListHostedZonesRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Route53.ListHostedZonesRequest { return .init( delegationSetId: self.delegationSetId, + hostedZoneType: self.hostedZoneType, marker: token, maxItems: self.maxItems ) diff --git a/Sources/Soto/Services/Route53/Route53_shapes.swift b/Sources/Soto/Services/Route53/Route53_shapes.swift index a832355008..efb374bfa3 100644 --- a/Sources/Soto/Services/Route53/Route53_shapes.swift +++ b/Sources/Soto/Services/Route53/Route53_shapes.swift @@ -131,6 +131,11 @@ extension Route53 { public var description: String { return self.rawValue } } + public enum HostedZoneType: String, CustomStringConvertible, Codable, Sendable { + case privateHostedZone = "PrivateHostedZone" + public var description: String { return self.rawValue } + } + public enum InsufficientDataHealthStatus: String, CustomStringConvertible, Codable, Sendable { case healthy = "Healthy" case lastKnownStatus = "LastKnownStatus" @@ -1073,7 +1078,8 @@ extension Route53 { /// CallerReference as an existing health check but with different /// settings, Route 53 returns a HealthCheckAlreadyExists error. If you send a CreateHealthCheck request with a unique /// CallerReference but settings identical to an existing health - /// check, Route 53 creates the health check. + /// check, Route 53 creates the health check. Route 53 does not store the CallerReference for a deleted health check indefinitely. + /// The CallerReference for a deleted health check will be deleted after a number of days. public let callerReference: String /// A complex type that contains settings for a new health check. public let healthCheckConfig: HealthCheckConfig @@ -2060,7 +2066,7 @@ extension Route53 { /// or SubdivisionCode returns an InvalidInput error. public let continentCode: String? /// For geolocation resource record sets, the two-letter code for a country. Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 - /// alpha-2. + /// alpha-2. Route 53 also supports the contry code UA forr Ukraine. public let countryCode: String? /// For geolocation resource record sets, the two-letter code for a state of the United /// States. Route 53 doesn't support any other values for SubdivisionCode. For @@ -2277,7 +2283,7 @@ extension Route53 { /// continent. Amazon Route 53 supports the following continent codes: AF: Africa AN: Antarctica AS: Asia EU: Europe OC: Oceania NA: North America SA: South America public let continentCode: String? /// Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 - /// alpha-2. + /// alpha-2. Route 53 also supports the contry code UA forr Ukraine. public let countryCode: String? /// The code for the subdivision, such as a particular state within the United States. For /// a list of US state abbreviations, see Appendix B: Two–Letter State and @@ -3618,9 +3624,9 @@ extension Route53 { /// false, there are no more health checks to get. public let marker: String? /// The maximum number of health checks that you want ListHealthChecks to - /// return in response to the current request. Amazon Route 53 returns a maximum of 100 - /// items. If you set MaxItems to a value greater than 100, Route 53 returns - /// only the first 100 health checks. + /// return in response to the current request. Amazon Route 53 returns a maximum of 1000 + /// items. If you set MaxItems to a value greater than 1000, Route 53 returns + /// only the first 1000 health checks. public let maxItems: Int? public init(marker: String? = nil, maxItems: Int? = nil) { @@ -3850,6 +3856,7 @@ extension Route53 { public struct ListHostedZonesRequest: AWSEncodableShape { public static var _encoding = [ AWSMemberEncoding(label: "delegationSetId", location: .querystring("delegationsetid")), + AWSMemberEncoding(label: "hostedZoneType", location: .querystring("hostedzonetype")), AWSMemberEncoding(label: "marker", location: .querystring("marker")), AWSMemberEncoding(label: "maxItems", location: .querystring("maxitems")) ] @@ -3858,6 +3865,9 @@ extension Route53 { /// that are associated with a reusable delegation set, specify the ID of that reusable /// delegation set. public let delegationSetId: String? + /// (Optional) Specifies if the hosted zone is private. + /// + public let hostedZoneType: HostedZoneType? /// If the value of IsTruncated in the previous response was /// true, you have more hosted zones. To get more hosted zones, submit /// another ListHostedZones request. For the value of marker, specify the value of NextMarker @@ -3872,8 +3882,9 @@ extension Route53 { /// will return if you submit another request. public let maxItems: Int? - public init(delegationSetId: String? = nil, marker: String? = nil, maxItems: Int? = nil) { + public init(delegationSetId: String? = nil, hostedZoneType: HostedZoneType? = nil, marker: String? = nil, maxItems: Int? = nil) { self.delegationSetId = delegationSetId + self.hostedZoneType = hostedZoneType self.marker = marker self.maxItems = maxItems } diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api+async.swift b/Sources/Soto/Services/SageMaker/SageMaker_api+async.swift index edffb60e79..3270e66415 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api+async.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api+async.swift @@ -136,7 +136,7 @@ extension SageMaker { return try await self.client.execute(operation: "CreateExperiment", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record. The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the FeatureGroups quota for your Amazon Web Services account. You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup. + /// Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record. The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the FeatureGroups quota for your Amazon Web Services account. Note that it can take approximately 10-15 minutes to provision an OnlineStore FeatureGroup with the InMemory StorageType. You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup. public func createFeatureGroup(_ input: CreateFeatureGroupRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateFeatureGroupResponse { return try await self.client.execute(operation: "CreateFeatureGroup", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -392,7 +392,7 @@ extension SageMaker { return try await self.client.execute(operation: "DeleteExperiment", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called. Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your OfflineStore are not deleted. + /// Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called. Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your OfflineStore are not deleted. Note that it can take approximately 10-15 minutes to delete an OnlineStore FeatureGroup with the InMemory StorageType. public func deleteFeatureGroup(_ input: DeleteFeatureGroupRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws { return try await self.client.execute(operation: "DeleteFeatureGroup", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index 6a9a6c67cc..e11ef03d5a 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -215,7 +215,7 @@ public struct SageMaker: AWSService { return self.client.execute(operation: "CreateExperiment", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record. The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the FeatureGroups quota for your Amazon Web Services account. You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup. + /// Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record. The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the FeatureGroups quota for your Amazon Web Services account. Note that it can take approximately 10-15 minutes to provision an OnlineStore FeatureGroup with the InMemory StorageType. You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup. public func createFeatureGroup(_ input: CreateFeatureGroupRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateFeatureGroup", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -471,7 +471,7 @@ public struct SageMaker: AWSService { return self.client.execute(operation: "DeleteExperiment", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called. Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your OfflineStore are not deleted. + /// Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called. Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your OfflineStore are not deleted. Note that it can take approximately 10-15 minutes to delete an OnlineStore FeatureGroup with the InMemory StorageType. @discardableResult public func deleteFeatureGroup(_ input: DeleteFeatureGroupRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeleteFeatureGroup", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index 45d9cb30d6..61f2bce1cd 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -36,6 +36,11 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum AdditionalS3DataSourceDataType: String, CustomStringConvertible, Codable, Sendable { + case s3object = "S3Object" + public var description: String { return self.rawValue } + } + public enum AggregationTransformationValue: String, CustomStringConvertible, Codable, Sendable { case avg = "avg" case first = "first" @@ -2752,6 +2757,32 @@ extension SageMaker { } } + public struct AdditionalS3DataSource: AWSEncodableShape & AWSDecodableShape { + /// The type of compression used for an additional data source used in inference or training. Specify None if your additional data source is not compressed. + public let compressionType: CompressionType? + /// The data type of the additional data source that you specify for use in inference or training. + public let s3DataType: AdditionalS3DataSourceDataType + /// The uniform resource identifier (URI) used to identify an additional data source used in inference or training. + public let s3Uri: String + + public init(compressionType: CompressionType? = nil, s3DataType: AdditionalS3DataSourceDataType, s3Uri: String) { + self.compressionType = compressionType + self.s3DataType = s3DataType + self.s3Uri = s3Uri + } + + public func validate(name: String) throws { + try self.validate(self.s3Uri, name: "s3Uri", parent: name, max: 1024) + try self.validate(self.s3Uri, name: "s3Uri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") + } + + private enum CodingKeys: String, CodingKey { + case compressionType = "CompressionType" + case s3DataType = "S3DataType" + case s3Uri = "S3Uri" + } + } + public struct AgentVersion: AWSDecodableShape { /// The number of Edge Manager agents. public let agentCount: Int64 @@ -2830,7 +2861,7 @@ extension SageMaker { try validate($0, name: "containerEntrypoint[]", parent: name, max: 256) try validate($0, name: "containerEntrypoint[]", parent: name, pattern: ".*") } - try self.validate(self.containerEntrypoint, name: "containerEntrypoint", parent: name, max: 10) + try self.validate(self.containerEntrypoint, name: "containerEntrypoint", parent: name, max: 100) try self.validate(self.containerEntrypoint, name: "containerEntrypoint", parent: name, min: 1) try self.metricDefinitions?.forEach { try $0.validate(name: "\(name).metricDefinitions[]") @@ -4347,17 +4378,23 @@ extension SageMaker { } public struct CanvasAppSettings: AWSEncodableShape & AWSDecodableShape { + /// The model deployment settings for the SageMaker Canvas application. + public let directDeploySettings: DirectDeploySettings? /// The settings for connecting to an external data source with OAuth. public let identityProviderOAuthSettings: [IdentityProviderOAuthSetting]? + /// The settings for document querying. + public let kendraSettings: KendraSettings? /// The model registry settings for the SageMaker Canvas application. public let modelRegisterSettings: ModelRegisterSettings? - /// Time series forecast settings for the Canvas application. + /// Time series forecast settings for the SageMaker Canvas application. public let timeSeriesForecastingSettings: TimeSeriesForecastingSettings? /// The workspace settings for the SageMaker Canvas application. public let workspaceSettings: WorkspaceSettings? - public init(identityProviderOAuthSettings: [IdentityProviderOAuthSetting]? = nil, modelRegisterSettings: ModelRegisterSettings? = nil, timeSeriesForecastingSettings: TimeSeriesForecastingSettings? = nil, workspaceSettings: WorkspaceSettings? = nil) { + public init(directDeploySettings: DirectDeploySettings? = nil, identityProviderOAuthSettings: [IdentityProviderOAuthSetting]? = nil, kendraSettings: KendraSettings? = nil, modelRegisterSettings: ModelRegisterSettings? = nil, timeSeriesForecastingSettings: TimeSeriesForecastingSettings? = nil, workspaceSettings: WorkspaceSettings? = nil) { + self.directDeploySettings = directDeploySettings self.identityProviderOAuthSettings = identityProviderOAuthSettings + self.kendraSettings = kendraSettings self.modelRegisterSettings = modelRegisterSettings self.timeSeriesForecastingSettings = timeSeriesForecastingSettings self.workspaceSettings = workspaceSettings @@ -4374,7 +4411,9 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case directDeploySettings = "DirectDeploySettings" case identityProviderOAuthSettings = "IdentityProviderOAuthSettings" + case kendraSettings = "KendraSettings" case modelRegisterSettings = "ModelRegisterSettings" case timeSeriesForecastingSettings = "TimeSeriesForecastingSettings" case workspaceSettings = "WorkspaceSettings" @@ -8580,7 +8619,7 @@ extension SageMaker { try validate($0.value, name: "environment[\"\($0.key)\"]", parent: name, max: 512) try validate($0.value, name: "environment[\"\($0.key)\"]", parent: name, pattern: "^[\\S\\s]*$") } - try self.validate(self.environment, name: "environment", parent: name, max: 48) + try self.validate(self.environment, name: "environment", parent: name, max: 100) try self.experimentConfig?.validate(name: "\(name).experimentConfig") try self.hyperParameters?.forEach { try validate($0.key, name: "hyperParameters.key", parent: name, max: 256) @@ -15657,6 +15696,19 @@ extension SageMaker { } } + public struct DirectDeploySettings: AWSEncodableShape & AWSDecodableShape { + /// Describes whether model deployment permissions are enabled or disabled in the Canvas application. + public let status: FeatureStatus? + + public init(status: FeatureStatus? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "Status" + } + } + public struct DisableSagemakerServicecatalogPortfolioInput: AWSEncodableShape { public init() {} } @@ -19252,6 +19304,19 @@ extension SageMaker { } } + public struct KendraSettings: AWSEncodableShape & AWSDecodableShape { + /// Describes whether the document querying feature is enabled or disabled in the Canvas application. + public let status: FeatureStatus? + + public init(status: FeatureStatus? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "Status" + } + } + public struct KernelGatewayAppSettings: AWSEncodableShape & AWSDecodableShape { /// A list of custom SageMaker images that are configured to run as a KernelGateway app. public let customImages: [CustomImage]? @@ -25822,6 +25887,8 @@ extension SageMaker { } public struct ModelPackageContainerDefinition: AWSEncodableShape & AWSDecodableShape { + /// The additional data source that is used during inference in the Docker container for your model package. + public let additionalS3DataSource: AdditionalS3DataSource? /// The DNS host name for the Docker container. public let containerHostname: String? /// The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map. @@ -25843,7 +25910,8 @@ extension SageMaker { /// The Amazon Web Services Marketplace product ID of the model package. public let productId: String? - public init(containerHostname: String? = nil, environment: [String: String]? = nil, framework: String? = nil, frameworkVersion: String? = nil, image: String, imageDigest: String? = nil, modelDataUrl: String? = nil, modelInput: ModelInput? = nil, nearestModelName: String? = nil, productId: String? = nil) { + public init(additionalS3DataSource: AdditionalS3DataSource? = nil, containerHostname: String? = nil, environment: [String: String]? = nil, framework: String? = nil, frameworkVersion: String? = nil, image: String, imageDigest: String? = nil, modelDataUrl: String? = nil, modelInput: ModelInput? = nil, nearestModelName: String? = nil, productId: String? = nil) { + self.additionalS3DataSource = additionalS3DataSource self.containerHostname = containerHostname self.environment = environment self.framework = framework @@ -25857,6 +25925,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.additionalS3DataSource?.validate(name: "\(name).additionalS3DataSource") try self.validate(self.containerHostname, name: "containerHostname", parent: name, max: 63) try self.validate(self.containerHostname, name: "containerHostname", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.environment?.forEach { @@ -25881,6 +25950,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case additionalS3DataSource = "AdditionalS3DataSource" case containerHostname = "ContainerHostname" case environment = "Environment" case framework = "Framework" @@ -32505,6 +32575,8 @@ extension SageMaker { } public struct TrainingSpecification: AWSEncodableShape & AWSDecodableShape { + /// The additional data source used during the training job. + public let additionalS3DataSource: AdditionalS3DataSource? /// A list of MetricDefinition objects, which are used for parsing metrics generated by the algorithm. public let metricDefinitions: [MetricDefinition]? /// A list of the HyperParameterSpecification objects, that define the supported hyperparameters. This is required if the algorithm supports automatic model tuning.> @@ -32522,7 +32594,8 @@ extension SageMaker { /// An MD5 hash of the training algorithm that identifies the Docker image used for training. public let trainingImageDigest: String? - public init(metricDefinitions: [MetricDefinition]? = nil, supportedHyperParameters: [HyperParameterSpecification]? = nil, supportedTrainingInstanceTypes: [TrainingInstanceType], supportedTuningJobObjectiveMetrics: [HyperParameterTuningJobObjective]? = nil, supportsDistributedTraining: Bool? = nil, trainingChannels: [ChannelSpecification], trainingImage: String, trainingImageDigest: String? = nil) { + public init(additionalS3DataSource: AdditionalS3DataSource? = nil, metricDefinitions: [MetricDefinition]? = nil, supportedHyperParameters: [HyperParameterSpecification]? = nil, supportedTrainingInstanceTypes: [TrainingInstanceType], supportedTuningJobObjectiveMetrics: [HyperParameterTuningJobObjective]? = nil, supportsDistributedTraining: Bool? = nil, trainingChannels: [ChannelSpecification], trainingImage: String, trainingImageDigest: String? = nil) { + self.additionalS3DataSource = additionalS3DataSource self.metricDefinitions = metricDefinitions self.supportedHyperParameters = supportedHyperParameters self.supportedTrainingInstanceTypes = supportedTrainingInstanceTypes @@ -32534,6 +32607,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.additionalS3DataSource?.validate(name: "\(name).additionalS3DataSource") try self.metricDefinitions?.forEach { try $0.validate(name: "\(name).metricDefinitions[]") } @@ -32557,6 +32631,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case additionalS3DataSource = "AdditionalS3DataSource" case metricDefinitions = "MetricDefinitions" case supportedHyperParameters = "SupportedHyperParameters" case supportedTrainingInstanceTypes = "SupportedTrainingInstanceTypes" diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift index d88ad4efbd..3388c44624 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift @@ -4571,6 +4571,263 @@ extension SecurityHub { } } + public struct AwsDmsEndpointDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) for the SSL certificate that encrypts connections between the DMS endpoint and the replication instance. + public let certificateArn: String? + /// The name of the endpoint database. + public let databaseName: String? + /// The Amazon Resource Name (ARN) of the endpoint. + public let endpointArn: String? + /// The database endpoint identifier. + public let endpointIdentifier: String? + /// The type of endpoint. Valid values are source and target. + public let endpointType: String? + /// The type of engine for the endpoint, depending on the EndpointType value. + public let engineName: String? + /// A value that can be used for cross-account validation. + public let externalId: String? + /// Additional attributes associated with the connection. + public let extraConnectionAttributes: String? + /// An DMS key identifier that is used to encrypt the connection parameters for the endpoint. If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region. + public let kmsKeyId: String? + /// The port used to access the endpoint. + public let port: Int? + /// The name of the server where the endpoint database resides. + public let serverName: String? + /// The SSL mode used to connect to the endpoint. The default is none. + public let sslMode: String? + /// The user name to be used to log in to the endpoint database. + public let username: String? + + public init(certificateArn: String? = nil, databaseName: String? = nil, endpointArn: String? = nil, endpointIdentifier: String? = nil, endpointType: String? = nil, engineName: String? = nil, externalId: String? = nil, extraConnectionAttributes: String? = nil, kmsKeyId: String? = nil, port: Int? = nil, serverName: String? = nil, sslMode: String? = nil, username: String? = nil) { + self.certificateArn = certificateArn + self.databaseName = databaseName + self.endpointArn = endpointArn + self.endpointIdentifier = endpointIdentifier + self.endpointType = endpointType + self.engineName = engineName + self.externalId = externalId + self.extraConnectionAttributes = extraConnectionAttributes + self.kmsKeyId = kmsKeyId + self.port = port + self.serverName = serverName + self.sslMode = sslMode + self.username = username + } + + public func validate(name: String) throws { + try self.validate(self.certificateArn, name: "certificateArn", parent: name, pattern: "\\S") + try self.validate(self.databaseName, name: "databaseName", parent: name, pattern: "\\S") + try self.validate(self.endpointArn, name: "endpointArn", parent: name, pattern: "\\S") + try self.validate(self.endpointIdentifier, name: "endpointIdentifier", parent: name, pattern: "\\S") + try self.validate(self.endpointType, name: "endpointType", parent: name, pattern: "\\S") + try self.validate(self.engineName, name: "engineName", parent: name, pattern: "\\S") + try self.validate(self.externalId, name: "externalId", parent: name, pattern: "\\S") + try self.validate(self.extraConnectionAttributes, name: "extraConnectionAttributes", parent: name, pattern: "\\S") + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "\\S") + try self.validate(self.serverName, name: "serverName", parent: name, pattern: "\\S") + try self.validate(self.sslMode, name: "sslMode", parent: name, pattern: "\\S") + try self.validate(self.username, name: "username", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case certificateArn = "CertificateArn" + case databaseName = "DatabaseName" + case endpointArn = "EndpointArn" + case endpointIdentifier = "EndpointIdentifier" + case endpointType = "EndpointType" + case engineName = "EngineName" + case externalId = "ExternalId" + case extraConnectionAttributes = "ExtraConnectionAttributes" + case kmsKeyId = "KmsKeyId" + case port = "Port" + case serverName = "ServerName" + case sslMode = "SslMode" + case username = "Username" + } + } + + public struct AwsDmsReplicationInstanceDetails: AWSEncodableShape & AWSDecodableShape { + /// The amount of storage (in gigabytes) that is allocated for the replication instance. + public let allocatedStorage: Int? + /// Indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. + public let autoMinorVersionUpgrade: Bool? + /// The Availability Zone that the replication instance is created in. The default value is a random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region, such as us-east-1d. + public let availabilityZone: String? + /// The engine version number of the replication instance. If an engine version number is not specified when a replication instance is created, the default is the latest engine version available. + public let engineVersion: String? + /// An KMS key identifier that is used to encrypt the data on the replication instance. If you don't specify a value for the KmsKeyId parameter, DMS uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region. + public let kmsKeyId: String? + /// Specifies whether the replication instance is deployed across multiple Availability Zones (AZs). You can't set the AvailabilityZone parameter if the MultiAZ parameter is set to true. + public let multiAZ: Bool? + /// The maintenance window times for the replication instance. Upgrades to the replication instance are performed during this time. + public let preferredMaintenanceWindow: String? + /// Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true. + public let publiclyAccessible: Bool? + /// The compute and memory capacity of the replication instance as defined for the specified replication instance class. + public let replicationInstanceClass: String? + /// The replication instance identifier. + public let replicationInstanceIdentifier: String? + /// The subnet group for the replication instance. + public let replicationSubnetGroup: AwsDmsReplicationInstanceReplicationSubnetGroupDetails? + /// The virtual private cloud (VPC) security group for the replication instance. + public let vpcSecurityGroups: [AwsDmsReplicationInstanceVpcSecurityGroupsDetails]? + + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, replicationSubnetGroup: AwsDmsReplicationInstanceReplicationSubnetGroupDetails? = nil, vpcSecurityGroups: [AwsDmsReplicationInstanceVpcSecurityGroupsDetails]? = nil) { + self.allocatedStorage = allocatedStorage + self.autoMinorVersionUpgrade = autoMinorVersionUpgrade + self.availabilityZone = availabilityZone + self.engineVersion = engineVersion + self.kmsKeyId = kmsKeyId + self.multiAZ = multiAZ + self.preferredMaintenanceWindow = preferredMaintenanceWindow + self.publiclyAccessible = publiclyAccessible + self.replicationInstanceClass = replicationInstanceClass + self.replicationInstanceIdentifier = replicationInstanceIdentifier + self.replicationSubnetGroup = replicationSubnetGroup + self.vpcSecurityGroups = vpcSecurityGroups + } + + public func validate(name: String) throws { + try self.validate(self.availabilityZone, name: "availabilityZone", parent: name, pattern: "\\S") + try self.validate(self.engineVersion, name: "engineVersion", parent: name, pattern: "\\S") + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "\\S") + try self.validate(self.preferredMaintenanceWindow, name: "preferredMaintenanceWindow", parent: name, pattern: "\\S") + try self.validate(self.replicationInstanceClass, name: "replicationInstanceClass", parent: name, pattern: "\\S") + try self.validate(self.replicationInstanceIdentifier, name: "replicationInstanceIdentifier", parent: name, pattern: "\\S") + try self.replicationSubnetGroup?.validate(name: "\(name).replicationSubnetGroup") + try self.vpcSecurityGroups?.forEach { + try $0.validate(name: "\(name).vpcSecurityGroups[]") + } + } + + private enum CodingKeys: String, CodingKey { + case allocatedStorage = "AllocatedStorage" + case autoMinorVersionUpgrade = "AutoMinorVersionUpgrade" + case availabilityZone = "AvailabilityZone" + case engineVersion = "EngineVersion" + case kmsKeyId = "KmsKeyId" + case multiAZ = "MultiAZ" + case preferredMaintenanceWindow = "PreferredMaintenanceWindow" + case publiclyAccessible = "PubliclyAccessible" + case replicationInstanceClass = "ReplicationInstanceClass" + case replicationInstanceIdentifier = "ReplicationInstanceIdentifier" + case replicationSubnetGroup = "ReplicationSubnetGroup" + case vpcSecurityGroups = "VpcSecurityGroups" + } + } + + public struct AwsDmsReplicationInstanceReplicationSubnetGroupDetails: AWSEncodableShape & AWSDecodableShape { + /// The identifier of the replication subnet group. + public let replicationSubnetGroupIdentifier: String? + + public init(replicationSubnetGroupIdentifier: String? = nil) { + self.replicationSubnetGroupIdentifier = replicationSubnetGroupIdentifier + } + + public func validate(name: String) throws { + try self.validate(self.replicationSubnetGroupIdentifier, name: "replicationSubnetGroupIdentifier", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case replicationSubnetGroupIdentifier = "ReplicationSubnetGroupIdentifier" + } + } + + public struct AwsDmsReplicationInstanceVpcSecurityGroupsDetails: AWSEncodableShape & AWSDecodableShape { + /// The identifier of the VPC security group that’s associated with the replication instance. + public let vpcSecurityGroupId: String? + + public init(vpcSecurityGroupId: String? = nil) { + self.vpcSecurityGroupId = vpcSecurityGroupId + } + + public func validate(name: String) throws { + try self.validate(self.vpcSecurityGroupId, name: "vpcSecurityGroupId", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case vpcSecurityGroupId = "VpcSecurityGroupId" + } + } + + public struct AwsDmsReplicationTaskDetails: AWSEncodableShape & AWSDecodableShape { + /// Indicates when you want a change data capture (CDC) operation to start. CCdcStartPosition or CCdcStartTime specifies when you want a CDC operation to start. Only a value for one of these fields is included. + public let cdcStartPosition: String? + /// Indicates the start time for a CDC operation. CdcStartPosition or CCdcStartTime specifies when you want a CDC operation to start. Only a value for one of these fields is included. + public let cdcStartTime: String? + /// Indicates when you want a CDC operation to stop. The value can be either server time or commit time. + public let cdcStopPosition: String? + /// The identifier of the replication task. + public let id: String? + /// The migration type. + public let migrationType: String? + /// The Amazon Resource Name (ARN) of a replication instance. + public let replicationInstanceArn: String? + /// The user-defined replication task identifier or name. + public let replicationTaskIdentifier: String? + /// The settings for the replication task. + public let replicationTaskSettings: String? + /// A display name for the resource identifier at the end of the EndpointArn response parameter. If you don't specify a ResourceIdentifier value, DMS generates a default identifier value for the end of EndpointArn. + public let resourceIdentifier: String? + /// The ARN of the source endpoint. + public let sourceEndpointArn: String? + /// The table mappings for the replication task, in JSON format. + public let tableMappings: String? + /// The ARN of the target endpoint. + public let targetEndpointArn: String? + /// Supplemental information that the task requires to migrate the data for certain source and target endpoints. + public let taskData: String? + + public init(cdcStartPosition: String? = nil, cdcStartTime: String? = nil, cdcStopPosition: String? = nil, id: String? = nil, migrationType: String? = nil, replicationInstanceArn: String? = nil, replicationTaskIdentifier: String? = nil, replicationTaskSettings: String? = nil, resourceIdentifier: String? = nil, sourceEndpointArn: String? = nil, tableMappings: String? = nil, targetEndpointArn: String? = nil, taskData: String? = nil) { + self.cdcStartPosition = cdcStartPosition + self.cdcStartTime = cdcStartTime + self.cdcStopPosition = cdcStopPosition + self.id = id + self.migrationType = migrationType + self.replicationInstanceArn = replicationInstanceArn + self.replicationTaskIdentifier = replicationTaskIdentifier + self.replicationTaskSettings = replicationTaskSettings + self.resourceIdentifier = resourceIdentifier + self.sourceEndpointArn = sourceEndpointArn + self.tableMappings = tableMappings + self.targetEndpointArn = targetEndpointArn + self.taskData = taskData + } + + public func validate(name: String) throws { + try self.validate(self.cdcStartPosition, name: "cdcStartPosition", parent: name, pattern: "\\S") + try self.validate(self.cdcStartTime, name: "cdcStartTime", parent: name, pattern: "\\S") + try self.validate(self.cdcStopPosition, name: "cdcStopPosition", parent: name, pattern: "\\S") + try self.validate(self.id, name: "id", parent: name, pattern: "\\S") + try self.validate(self.migrationType, name: "migrationType", parent: name, pattern: "\\S") + try self.validate(self.replicationInstanceArn, name: "replicationInstanceArn", parent: name, pattern: "\\S") + try self.validate(self.replicationTaskIdentifier, name: "replicationTaskIdentifier", parent: name, pattern: "\\S") + try self.validate(self.replicationTaskSettings, name: "replicationTaskSettings", parent: name, pattern: "\\S") + try self.validate(self.resourceIdentifier, name: "resourceIdentifier", parent: name, pattern: "\\S") + try self.validate(self.sourceEndpointArn, name: "sourceEndpointArn", parent: name, pattern: "\\S") + try self.validate(self.tableMappings, name: "tableMappings", parent: name, pattern: "\\S") + try self.validate(self.targetEndpointArn, name: "targetEndpointArn", parent: name, pattern: "\\S") + try self.validate(self.taskData, name: "taskData", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case cdcStartPosition = "CdcStartPosition" + case cdcStartTime = "CdcStartTime" + case cdcStopPosition = "CdcStopPosition" + case id = "Id" + case migrationType = "MigrationType" + case replicationInstanceArn = "ReplicationInstanceArn" + case replicationTaskIdentifier = "ReplicationTaskIdentifier" + case replicationTaskSettings = "ReplicationTaskSettings" + case resourceIdentifier = "ResourceIdentifier" + case sourceEndpointArn = "SourceEndpointArn" + case tableMappings = "TableMappings" + case targetEndpointArn = "TargetEndpointArn" + case taskData = "TaskData" + } + } + public struct AwsDynamoDbTableAttributeDefinition: AWSEncodableShape & AWSDecodableShape { /// The name of the attribute. public let attributeName: String? @@ -9109,12 +9366,14 @@ extension SecurityHub { public let proxyConfiguration: AwsEcsTaskDefinitionProxyConfigurationDetails? /// The task launch types that the task definition was validated against. public let requiresCompatibilities: [String]? + /// The status of the task definition. + public let status: String? /// The short name or ARN of the IAM role that grants containers in the task permission to call Amazon Web Services API operations on your behalf. public let taskRoleArn: String? /// The data volume definitions for the task. public let volumes: [AwsEcsTaskDefinitionVolumesDetails]? - public init(containerDefinitions: [AwsEcsTaskDefinitionContainerDefinitionsDetails]? = nil, cpu: String? = nil, executionRoleArn: String? = nil, family: String? = nil, inferenceAccelerators: [AwsEcsTaskDefinitionInferenceAcceleratorsDetails]? = nil, ipcMode: String? = nil, memory: String? = nil, networkMode: String? = nil, pidMode: String? = nil, placementConstraints: [AwsEcsTaskDefinitionPlacementConstraintsDetails]? = nil, proxyConfiguration: AwsEcsTaskDefinitionProxyConfigurationDetails? = nil, requiresCompatibilities: [String]? = nil, taskRoleArn: String? = nil, volumes: [AwsEcsTaskDefinitionVolumesDetails]? = nil) { + public init(containerDefinitions: [AwsEcsTaskDefinitionContainerDefinitionsDetails]? = nil, cpu: String? = nil, executionRoleArn: String? = nil, family: String? = nil, inferenceAccelerators: [AwsEcsTaskDefinitionInferenceAcceleratorsDetails]? = nil, ipcMode: String? = nil, memory: String? = nil, networkMode: String? = nil, pidMode: String? = nil, placementConstraints: [AwsEcsTaskDefinitionPlacementConstraintsDetails]? = nil, proxyConfiguration: AwsEcsTaskDefinitionProxyConfigurationDetails? = nil, requiresCompatibilities: [String]? = nil, status: String? = nil, taskRoleArn: String? = nil, volumes: [AwsEcsTaskDefinitionVolumesDetails]? = nil) { self.containerDefinitions = containerDefinitions self.cpu = cpu self.executionRoleArn = executionRoleArn @@ -9127,6 +9386,7 @@ extension SecurityHub { self.placementConstraints = placementConstraints self.proxyConfiguration = proxyConfiguration self.requiresCompatibilities = requiresCompatibilities + self.status = status self.taskRoleArn = taskRoleArn self.volumes = volumes } @@ -9152,6 +9412,7 @@ extension SecurityHub { try self.requiresCompatibilities?.forEach { try validate($0, name: "requiresCompatibilities[]", parent: name, pattern: "\\S") } + try self.validate(self.status, name: "status", parent: name, pattern: "\\S") try self.validate(self.taskRoleArn, name: "taskRoleArn", parent: name, pattern: "\\S") try self.volumes?.forEach { try $0.validate(name: "\(name).volumes[]") @@ -9171,6 +9432,7 @@ extension SecurityHub { case placementConstraints = "PlacementConstraints" case proxyConfiguration = "ProxyConfiguration" case requiresCompatibilities = "RequiresCompatibilities" + case status = "Status" case taskRoleArn = "TaskRoleArn" case volumes = "Volumes" } @@ -10867,6 +11129,209 @@ extension SecurityHub { } } + public struct AwsEventsEndpointDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the endpoint. + public let arn: String? + /// A description of the endpoint. + public let description: String? + /// The URL subdomain of the endpoint. For example, if EndpointUrl is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo. + public let endpointId: String? + /// The URL of the endpoint. + public let endpointUrl: String? + /// The event buses being used by the endpoint. + public let eventBuses: [AwsEventsEndpointEventBusesDetails]? + /// The name of the endpoint. + public let name: String? + /// Whether event replication was enabled or disabled for this endpoint. The default state is ENABLED, which means you must supply a RoleArn. If you don't have a RoleArn or you don't want event replication enabled, set the state to DISABLED. + public let replicationConfig: AwsEventsEndpointReplicationConfigDetails? + /// The ARN of the role used by event replication for the endpoint. + public let roleArn: String? + /// The routing configuration of the endpoint. + public let routingConfig: AwsEventsEndpointRoutingConfigDetails? + /// The current state of the endpoint. + public let state: String? + /// The reason the endpoint is in its current state. + public let stateReason: String? + + public init(arn: String? = nil, description: String? = nil, endpointId: String? = nil, endpointUrl: String? = nil, eventBuses: [AwsEventsEndpointEventBusesDetails]? = nil, name: String? = nil, replicationConfig: AwsEventsEndpointReplicationConfigDetails? = nil, roleArn: String? = nil, routingConfig: AwsEventsEndpointRoutingConfigDetails? = nil, state: String? = nil, stateReason: String? = nil) { + self.arn = arn + self.description = description + self.endpointId = endpointId + self.endpointUrl = endpointUrl + self.eventBuses = eventBuses + self.name = name + self.replicationConfig = replicationConfig + self.roleArn = roleArn + self.routingConfig = routingConfig + self.state = state + self.stateReason = stateReason + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "\\S") + try self.validate(self.description, name: "description", parent: name, pattern: "\\S") + try self.validate(self.endpointId, name: "endpointId", parent: name, pattern: "\\S") + try self.validate(self.endpointUrl, name: "endpointUrl", parent: name, pattern: "\\S") + try self.eventBuses?.forEach { + try $0.validate(name: "\(name).eventBuses[]") + } + try self.validate(self.name, name: "name", parent: name, pattern: "\\S") + try self.replicationConfig?.validate(name: "\(name).replicationConfig") + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "\\S") + try self.routingConfig?.validate(name: "\(name).routingConfig") + try self.validate(self.state, name: "state", parent: name, pattern: "\\S") + try self.validate(self.stateReason, name: "stateReason", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case description = "Description" + case endpointId = "EndpointId" + case endpointUrl = "EndpointUrl" + case eventBuses = "EventBuses" + case name = "Name" + case replicationConfig = "ReplicationConfig" + case roleArn = "RoleArn" + case routingConfig = "RoutingConfig" + case state = "State" + case stateReason = "StateReason" + } + } + + public struct AwsEventsEndpointEventBusesDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the event bus that the endpoint is associated with. + public let eventBusArn: String? + + public init(eventBusArn: String? = nil) { + self.eventBusArn = eventBusArn + } + + public func validate(name: String) throws { + try self.validate(self.eventBusArn, name: "eventBusArn", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case eventBusArn = "EventBusArn" + } + } + + public struct AwsEventsEndpointReplicationConfigDetails: AWSEncodableShape & AWSDecodableShape { + /// The state of event replication. + public let state: String? + + public init(state: String? = nil) { + self.state = state + } + + public func validate(name: String) throws { + try self.validate(self.state, name: "state", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case state = "State" + } + } + + public struct AwsEventsEndpointRoutingConfigDetails: AWSEncodableShape & AWSDecodableShape { + /// The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered. + public let failoverConfig: AwsEventsEndpointRoutingConfigFailoverConfigDetails? + + public init(failoverConfig: AwsEventsEndpointRoutingConfigFailoverConfigDetails? = nil) { + self.failoverConfig = failoverConfig + } + + public func validate(name: String) throws { + try self.failoverConfig?.validate(name: "\(name).failoverConfig") + } + + private enum CodingKeys: String, CodingKey { + case failoverConfig = "FailoverConfig" + } + } + + public struct AwsEventsEndpointRoutingConfigFailoverConfigDetails: AWSEncodableShape & AWSDecodableShape { + /// The main Region of the endpoint. + public let primary: AwsEventsEndpointRoutingConfigFailoverConfigPrimaryDetails? + /// The Region that events are routed to when failover is triggered or event replication is enabled. + public let secondary: AwsEventsEndpointRoutingConfigFailoverConfigSecondaryDetails? + + public init(primary: AwsEventsEndpointRoutingConfigFailoverConfigPrimaryDetails? = nil, secondary: AwsEventsEndpointRoutingConfigFailoverConfigSecondaryDetails? = nil) { + self.primary = primary + self.secondary = secondary + } + + public func validate(name: String) throws { + try self.primary?.validate(name: "\(name).primary") + try self.secondary?.validate(name: "\(name).secondary") + } + + private enum CodingKeys: String, CodingKey { + case primary = "Primary" + case secondary = "Secondary" + } + } + + public struct AwsEventsEndpointRoutingConfigFailoverConfigPrimaryDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the health check used by the endpoint to determine whether failover is triggered. + public let healthCheck: String? + + public init(healthCheck: String? = nil) { + self.healthCheck = healthCheck + } + + public func validate(name: String) throws { + try self.validate(self.healthCheck, name: "healthCheck", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case healthCheck = "HealthCheck" + } + } + + public struct AwsEventsEndpointRoutingConfigFailoverConfigSecondaryDetails: AWSEncodableShape & AWSDecodableShape { + /// Defines the secondary Region. + public let route: String? + + public init(route: String? = nil) { + self.route = route + } + + public func validate(name: String) throws { + try self.validate(self.route, name: "route", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case route = "Route" + } + } + + public struct AwsEventsEventbusDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the account permitted to write events to the current account. + public let arn: String? + /// The name of the event bus. + public let name: String? + /// The policy that enables the external account to send events to your account. + public let policy: String? + + public init(arn: String? = nil, name: String? = nil, policy: String? = nil) { + self.arn = arn + self.name = name + self.policy = policy + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "\\S") + try self.validate(self.name, name: "name", parent: name, pattern: "\\S") + try self.validate(self.policy, name: "policy", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case name = "Name" + case policy = "Policy" + } + } + public struct AwsGuardDutyDetectorDataSourcesCloudTrailDetails: AWSEncodableShape & AWSDecodableShape { /// Specifies whether CloudTrail is activated as a data source for the detector. public let status: String? @@ -12169,6 +12634,227 @@ extension SecurityHub { } } + public struct AwsMskClusterClusterInfoClientAuthenticationDetails: AWSEncodableShape & AWSDecodableShape { + /// Provides details for client authentication using SASL. + public let sasl: AwsMskClusterClusterInfoClientAuthenticationSaslDetails? + /// Provides details for client authentication using TLS. + public let tls: AwsMskClusterClusterInfoClientAuthenticationTlsDetails? + /// Provides details for allowing no client authentication. + public let unauthenticated: AwsMskClusterClusterInfoClientAuthenticationUnauthenticatedDetails? + + public init(sasl: AwsMskClusterClusterInfoClientAuthenticationSaslDetails? = nil, tls: AwsMskClusterClusterInfoClientAuthenticationTlsDetails? = nil, unauthenticated: AwsMskClusterClusterInfoClientAuthenticationUnauthenticatedDetails? = nil) { + self.sasl = sasl + self.tls = tls + self.unauthenticated = unauthenticated + } + + public func validate(name: String) throws { + try self.tls?.validate(name: "\(name).tls") + } + + private enum CodingKeys: String, CodingKey { + case sasl = "Sasl" + case tls = "Tls" + case unauthenticated = "Unauthenticated" + } + } + + public struct AwsMskClusterClusterInfoClientAuthenticationSaslDetails: AWSEncodableShape & AWSDecodableShape { + /// Provides details for SASL client authentication using IAM. + public let iam: AwsMskClusterClusterInfoClientAuthenticationSaslIamDetails? + /// Details for SASL client authentication using SCRAM. + public let scram: AwsMskClusterClusterInfoClientAuthenticationSaslScramDetails? + + public init(iam: AwsMskClusterClusterInfoClientAuthenticationSaslIamDetails? = nil, scram: AwsMskClusterClusterInfoClientAuthenticationSaslScramDetails? = nil) { + self.iam = iam + self.scram = scram + } + + private enum CodingKeys: String, CodingKey { + case iam = "Iam" + case scram = "Scram" + } + } + + public struct AwsMskClusterClusterInfoClientAuthenticationSaslIamDetails: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether SASL/IAM authentication is enabled or not. + public let enabled: Bool? + + public init(enabled: Bool? = nil) { + self.enabled = enabled + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + } + } + + public struct AwsMskClusterClusterInfoClientAuthenticationSaslScramDetails: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether SASL/SCRAM authentication is enabled or not. + public let enabled: Bool? + + public init(enabled: Bool? = nil) { + self.enabled = enabled + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + } + } + + public struct AwsMskClusterClusterInfoClientAuthenticationTlsDetails: AWSEncodableShape & AWSDecodableShape { + /// List of Amazon Web Services Private CA Amazon Resource Names (ARNs). Amazon Web Services Private CA enables creation of + /// private certificate authority (CA) hierarchies, including root and subordinate CAs, without the investment and maintenance costs + /// of operating an on-premises CA. + public let certificateAuthorityArnList: [String]? + /// Indicates whether TLS authentication is enabled or not. + public let enabled: Bool? + + public init(certificateAuthorityArnList: [String]? = nil, enabled: Bool? = nil) { + self.certificateAuthorityArnList = certificateAuthorityArnList + self.enabled = enabled + } + + public func validate(name: String) throws { + try self.certificateAuthorityArnList?.forEach { + try validate($0, name: "certificateAuthorityArnList[]", parent: name, pattern: "\\S") + } + } + + private enum CodingKeys: String, CodingKey { + case certificateAuthorityArnList = "CertificateAuthorityArnList" + case enabled = "Enabled" + } + } + + public struct AwsMskClusterClusterInfoClientAuthenticationUnauthenticatedDetails: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether unauthenticated is allowed or not. + public let enabled: Bool? + + public init(enabled: Bool? = nil) { + self.enabled = enabled + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + } + } + + public struct AwsMskClusterClusterInfoDetails: AWSEncodableShape & AWSDecodableShape { + /// Provides information for different modes of client authentication. + public let clientAuthentication: AwsMskClusterClusterInfoClientAuthenticationDetails? + /// The name of the cluster. + public let clusterName: String? + /// The current version of the MSK cluster. + public let currentVersion: String? + /// Includes encryption-related information, such as the KMS key used for encrypting data at rest and + /// whether you want Amazon MSK to encrypt your data in transit. + public let encryptionInfo: AwsMskClusterClusterInfoEncryptionInfoDetails? + /// The number of broker nodes in the cluster. + public let numberOfBrokerNodes: Int? + + public init(clientAuthentication: AwsMskClusterClusterInfoClientAuthenticationDetails? = nil, clusterName: String? = nil, currentVersion: String? = nil, encryptionInfo: AwsMskClusterClusterInfoEncryptionInfoDetails? = nil, numberOfBrokerNodes: Int? = nil) { + self.clientAuthentication = clientAuthentication + self.clusterName = clusterName + self.currentVersion = currentVersion + self.encryptionInfo = encryptionInfo + self.numberOfBrokerNodes = numberOfBrokerNodes + } + + public func validate(name: String) throws { + try self.clientAuthentication?.validate(name: "\(name).clientAuthentication") + try self.validate(self.clusterName, name: "clusterName", parent: name, pattern: "\\S") + try self.validate(self.currentVersion, name: "currentVersion", parent: name, pattern: "\\S") + try self.encryptionInfo?.validate(name: "\(name).encryptionInfo") + } + + private enum CodingKeys: String, CodingKey { + case clientAuthentication = "ClientAuthentication" + case clusterName = "ClusterName" + case currentVersion = "CurrentVersion" + case encryptionInfo = "EncryptionInfo" + case numberOfBrokerNodes = "NumberOfBrokerNodes" + } + } + + public struct AwsMskClusterClusterInfoEncryptionInfoDetails: AWSEncodableShape & AWSDecodableShape { + /// The data-volume encryption details. You can't update encryption at rest settings for existing clusters. + public let encryptionAtRest: AwsMskClusterClusterInfoEncryptionInfoEncryptionAtRestDetails? + /// The settings for encrypting data in transit. + public let encryptionInTransit: AwsMskClusterClusterInfoEncryptionInfoEncryptionInTransitDetails? + + public init(encryptionAtRest: AwsMskClusterClusterInfoEncryptionInfoEncryptionAtRestDetails? = nil, encryptionInTransit: AwsMskClusterClusterInfoEncryptionInfoEncryptionInTransitDetails? = nil) { + self.encryptionAtRest = encryptionAtRest + self.encryptionInTransit = encryptionInTransit + } + + public func validate(name: String) throws { + try self.encryptionAtRest?.validate(name: "\(name).encryptionAtRest") + try self.encryptionInTransit?.validate(name: "\(name).encryptionInTransit") + } + + private enum CodingKeys: String, CodingKey { + case encryptionAtRest = "EncryptionAtRest" + case encryptionInTransit = "EncryptionInTransit" + } + } + + public struct AwsMskClusterClusterInfoEncryptionInfoEncryptionAtRestDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the KMS key for encrypting data at rest. If you don't specify a + /// KMS key, MSK creates one for you and uses it. + public let dataVolumeKMSKeyId: String? + + public init(dataVolumeKMSKeyId: String? = nil) { + self.dataVolumeKMSKeyId = dataVolumeKMSKeyId + } + + public func validate(name: String) throws { + try self.validate(self.dataVolumeKMSKeyId, name: "dataVolumeKMSKeyId", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case dataVolumeKMSKeyId = "DataVolumeKMSKeyId" + } + } + + public struct AwsMskClusterClusterInfoEncryptionInfoEncryptionInTransitDetails: AWSEncodableShape & AWSDecodableShape { + /// Indicates the encryption setting for data in transit between clients and brokers. + public let clientBroker: String? + /// When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted. When set to false, the communication happens in plain text. The default value is true. + public let inCluster: Bool? + + public init(clientBroker: String? = nil, inCluster: Bool? = nil) { + self.clientBroker = clientBroker + self.inCluster = inCluster + } + + public func validate(name: String) throws { + try self.validate(self.clientBroker, name: "clientBroker", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case clientBroker = "ClientBroker" + case inCluster = "InCluster" + } + } + + public struct AwsMskClusterDetails: AWSEncodableShape & AWSDecodableShape { + /// Provides information about a cluster. + public let clusterInfo: AwsMskClusterClusterInfoDetails? + + public init(clusterInfo: AwsMskClusterClusterInfoDetails? = nil) { + self.clusterInfo = clusterInfo + } + + public func validate(name: String) throws { + try self.clusterInfo?.validate(name: "\(name).clusterInfo") + } + + private enum CodingKeys: String, CodingKey { + case clusterInfo = "ClusterInfo" + } + } + public struct AwsNetworkFirewallFirewallDetails: AWSEncodableShape & AWSDecodableShape { /// Whether the firewall is protected from deletion. If set to true, then the firewall cannot be deleted. public let deleteProtection: Bool? @@ -12760,6 +13446,8 @@ extension SecurityHub { public let allocatedStorage: Int? /// A list of the IAM roles that are associated with the DB cluster. public let associatedRoles: [AwsRdsDbClusterAssociatedRole]? + /// Indicates if minor version upgrades are automatically applied to the cluster. + public let autoMinorVersionUpgrade: Bool? /// A list of Availability Zones (AZs) where instances in the DB cluster can be created. public let availabilityZones: [String]? /// The number of days for which automated backups are retained. @@ -12829,10 +13517,11 @@ extension SecurityHub { /// A list of VPC security groups that the DB cluster belongs to. public let vpcSecurityGroups: [AwsRdsDbInstanceVpcSecurityGroup]? - public init(activityStreamStatus: String? = nil, allocatedStorage: Int? = nil, associatedRoles: [AwsRdsDbClusterAssociatedRole]? = nil, availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, clusterCreateTime: String? = nil, copyTagsToSnapshot: Bool? = nil, crossAccountClone: Bool? = nil, customEndpoints: [String]? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterMembers: [AwsRdsDbClusterMember]? = nil, dbClusterOptionGroupMemberships: [AwsRdsDbClusterOptionGroupMembership]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [AwsRdsDbDomainMembership]? = nil, enabledCloudWatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, hostedZoneId: String? = nil, httpEndpointEnabled: Bool? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, kmsKeyId: String? = nil, masterUsername: String? = nil, multiAz: Bool? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, status: String? = nil, storageEncrypted: Bool? = nil, vpcSecurityGroups: [AwsRdsDbInstanceVpcSecurityGroup]? = nil) { + public init(activityStreamStatus: String? = nil, allocatedStorage: Int? = nil, associatedRoles: [AwsRdsDbClusterAssociatedRole]? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, clusterCreateTime: String? = nil, copyTagsToSnapshot: Bool? = nil, crossAccountClone: Bool? = nil, customEndpoints: [String]? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterMembers: [AwsRdsDbClusterMember]? = nil, dbClusterOptionGroupMemberships: [AwsRdsDbClusterOptionGroupMembership]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [AwsRdsDbDomainMembership]? = nil, enabledCloudWatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, hostedZoneId: String? = nil, httpEndpointEnabled: Bool? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, kmsKeyId: String? = nil, masterUsername: String? = nil, multiAz: Bool? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, status: String? = nil, storageEncrypted: Bool? = nil, vpcSecurityGroups: [AwsRdsDbInstanceVpcSecurityGroup]? = nil) { self.activityStreamStatus = activityStreamStatus self.allocatedStorage = allocatedStorage self.associatedRoles = associatedRoles + self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZones = availabilityZones self.backupRetentionPeriod = backupRetentionPeriod self.clusterCreateTime = clusterCreateTime @@ -12921,6 +13610,7 @@ extension SecurityHub { case activityStreamStatus = "ActivityStreamStatus" case allocatedStorage = "AllocatedStorage" case associatedRoles = "AssociatedRoles" + case autoMinorVersionUpgrade = "AutoMinorVersionUpgrade" case availabilityZones = "AvailabilityZones" case backupRetentionPeriod = "BackupRetentionPeriod" case clusterCreateTime = "ClusterCreateTime" @@ -14834,6 +15524,127 @@ extension SecurityHub { } } + public struct AwsRoute53HostedZoneConfigDetails: AWSEncodableShape & AWSDecodableShape { + /// Any comments that you include about the hosted zone. + public let comment: String? + + public init(comment: String? = nil) { + self.comment = comment + } + + public func validate(name: String) throws { + try self.validate(self.comment, name: "comment", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case comment = "Comment" + } + } + + public struct AwsRoute53HostedZoneDetails: AWSEncodableShape & AWSDecodableShape { + /// An object that contains information about the specified hosted zone. + public let hostedZone: AwsRoute53HostedZoneObjectDetails? + /// An object that contains a list of the authoritative name servers for a hosted zone or for a reusable delegation set. + public let nameServers: [String]? + /// An array that contains one QueryLoggingConfig element for each DNS query logging configuration that is + /// associated with the current Amazon Web Services account. + public let queryLoggingConfig: AwsRoute53QueryLoggingConfigDetails? + /// An object that contains information about the Amazon Virtual Private Clouds (Amazon VPCs) that are associated with + /// the specified hosted zone. + public let vpcs: [AwsRoute53HostedZoneVpcDetails]? + + public init(hostedZone: AwsRoute53HostedZoneObjectDetails? = nil, nameServers: [String]? = nil, queryLoggingConfig: AwsRoute53QueryLoggingConfigDetails? = nil, vpcs: [AwsRoute53HostedZoneVpcDetails]? = nil) { + self.hostedZone = hostedZone + self.nameServers = nameServers + self.queryLoggingConfig = queryLoggingConfig + self.vpcs = vpcs + } + + public func validate(name: String) throws { + try self.hostedZone?.validate(name: "\(name).hostedZone") + try self.nameServers?.forEach { + try validate($0, name: "nameServers[]", parent: name, pattern: "\\S") + } + try self.queryLoggingConfig?.validate(name: "\(name).queryLoggingConfig") + try self.vpcs?.forEach { + try $0.validate(name: "\(name).vpcs[]") + } + } + + private enum CodingKeys: String, CodingKey { + case hostedZone = "HostedZone" + case nameServers = "NameServers" + case queryLoggingConfig = "QueryLoggingConfig" + case vpcs = "Vpcs" + } + } + + public struct AwsRoute53HostedZoneObjectDetails: AWSEncodableShape & AWSDecodableShape { + /// An object that includes the Comment element. + public let config: AwsRoute53HostedZoneConfigDetails? + /// The ID that Route 53 assigns to the hosted zone when you create it. + public let id: String? + /// The name of the domain. For public hosted zones, this is the name that you have registered with your DNS registrar. + public let name: String? + + public init(config: AwsRoute53HostedZoneConfigDetails? = nil, id: String? = nil, name: String? = nil) { + self.config = config + self.id = id + self.name = name + } + + public func validate(name: String) throws { + try self.config?.validate(name: "\(name).config") + try self.validate(self.id, name: "id", parent: name, pattern: "\\S") + try self.validate(self.name, name: "name", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case config = "Config" + case id = "Id" + case name = "Name" + } + } + + public struct AwsRoute53HostedZoneVpcDetails: AWSEncodableShape & AWSDecodableShape { + /// The identifier of an Amazon VPC. + public let id: String? + /// The Amazon Web Services Region that an Amazon VPC was created in. + public let region: String? + + public init(id: String? = nil, region: String? = nil) { + self.id = id + self.region = region + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, pattern: "\\S") + try self.validate(self.region, name: "region", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case id = "Id" + case region = "Region" + } + } + + public struct AwsRoute53QueryLoggingConfigDetails: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log group that Route 53 is publishing logs to. + public let cloudWatchLogsLogGroupArn: CloudWatchLogsLogGroupArnConfigDetails? + + public init(cloudWatchLogsLogGroupArn: CloudWatchLogsLogGroupArnConfigDetails? = nil) { + self.cloudWatchLogsLogGroupArn = cloudWatchLogsLogGroupArn + } + + public func validate(name: String) throws { + try self.cloudWatchLogsLogGroupArn?.validate(name: "\(name).cloudWatchLogsLogGroupArn") + } + + private enum CodingKeys: String, CodingKey { + case cloudWatchLogsLogGroupArn = "CloudWatchLogsLogGroupArn" + } + } + public struct AwsS3AccountPublicAccessBlockDetails: AWSEncodableShape & AWSDecodableShape { /// Indicates whether to reject calls to update an S3 bucket if the calls include a public access control list (ACL). public let blockPublicAcls: Bool? @@ -18869,6 +19680,33 @@ extension SecurityHub { } } + public struct CloudWatchLogsLogGroupArnConfigDetails: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the CloudWatch Logs log group that Route 53 is publishing logs to. + public let cloudWatchLogsLogGroupArn: String? + /// The ID of the hosted zone that CloudWatch Logs is logging queries for. + public let hostedZoneId: String? + /// The ID for a DNS query logging configuration. + public let id: String? + + public init(cloudWatchLogsLogGroupArn: String? = nil, hostedZoneId: String? = nil, id: String? = nil) { + self.cloudWatchLogsLogGroupArn = cloudWatchLogsLogGroupArn + self.hostedZoneId = hostedZoneId + self.id = id + } + + public func validate(name: String) throws { + try self.validate(self.cloudWatchLogsLogGroupArn, name: "cloudWatchLogsLogGroupArn", parent: name, pattern: "\\S") + try self.validate(self.hostedZoneId, name: "hostedZoneId", parent: name, pattern: "\\S") + try self.validate(self.id, name: "id", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case cloudWatchLogsLogGroupArn = "CloudWatchLogsLogGroupArn" + case hostedZoneId = "HostedZoneId" + case id = "Id" + } + } + public struct CodeVulnerabilitiesFilePath: AWSEncodableShape & AWSDecodableShape { /// The line number of the last line of code in which the vulnerability is located. public let endLine: Int? @@ -22251,6 +23089,15 @@ extension SecurityHub { public let awsCloudWatchAlarm: AwsCloudWatchAlarmDetails? /// Details for an CodeBuild project. public let awsCodeBuildProject: AwsCodeBuildProjectDetails? + /// Provides details about an Database Migration Service (DMS) endpoint. An endpoint provides connection, data + /// store type, and location information about your data store. + public let awsDmsEndpoint: AwsDmsEndpointDetails? + /// Provides details about an DMS replication instance. DMS uses a replication instance to connect to your + /// source data store, read the source data, and format the data for consumption by the target data store. + public let awsDmsReplicationInstance: AwsDmsReplicationInstanceDetails? + /// Provides details about an DMS replication task. A replication task moves a set of data from the source + /// endpoint to the target endpoint. + public let awsDmsReplicationTask: AwsDmsReplicationTaskDetails? /// Details about a DynamoDB table. public let awsDynamoDbTable: AwsDynamoDbTableDetails? /// Details about an Elastic IP address. @@ -22309,6 +23156,12 @@ extension SecurityHub { public let awsElbv2LoadBalancer: AwsElbv2LoadBalancerDetails? /// A schema defines the structure of events that are sent to Amazon EventBridge. Schema registries are containers for schemas. They collect and organize schemas so that your schemas are in logical groups. public let awsEventSchemasRegistry: AwsEventSchemasRegistryDetails? + /// Provides details about an Amazon EventBridge global endpoint. The endpoint can improve your application’s + /// availability by making it Regional-fault tolerant. + public let awsEventsEndpoint: AwsEventsEndpointDetails? + /// Provides details about Amazon EventBridge event bus for an endpoint. An event bus is a router that receives events + /// and delivers them to zero or more destinations, or targets. + public let awsEventsEventbus: AwsEventsEventbusDetails? /// Provides details about an Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector is required for GuardDuty to become operational. public let awsGuardDutyDetector: AwsGuardDutyDetectorDetails? /// Details about an IAM access key related to a finding. @@ -22329,6 +23182,8 @@ extension SecurityHub { public let awsLambdaFunction: AwsLambdaFunctionDetails? /// Details for a Lambda layer version. public let awsLambdaLayerVersion: AwsLambdaLayerVersionDetails? + /// Provides details about an Amazon Managed Streaming for Apache Kafka (Amazon MSK) cluster. + public let awsMskCluster: AwsMskClusterDetails? /// Details about an Network Firewall firewall. public let awsNetworkFirewallFirewall: AwsNetworkFirewallFirewallDetails? /// Details about an Network Firewall firewall policy. @@ -22351,6 +23206,9 @@ extension SecurityHub { public let awsRdsEventSubscription: AwsRdsEventSubscriptionDetails? /// Contains details about an Amazon Redshift cluster. public let awsRedshiftCluster: AwsRedshiftClusterDetails? + /// Provides details about an Amazon Route 53 hosted zone, including the four name servers assigned to the hosted + /// zone. A hosted zone represents a collection of records that can be managed together, belonging to a single parent domain name. + public let awsRoute53HostedZone: AwsRoute53HostedZoneDetails? /// Details about the Amazon S3 Public Access Block configuration for an account. public let awsS3AccountPublicAccessBlock: AwsS3AccountPublicAccessBlockDetails? /// Details about an S3 bucket related to a finding. @@ -22393,7 +23251,7 @@ extension SecurityHub { /// Details about a resource that are not available in a type-specific details object. Use the Other object in the following cases. The type-specific object does not contain all of the fields that you want to populate. In this case, first use the type-specific object to populate those fields. Use the Other object to populate the fields that are missing from the type-specific object. The resource type does not have a corresponding object. This includes resources for which the type is Other. public let other: [String: String]? - public init(awsAmazonMqBroker: AwsAmazonMqBrokerDetails? = nil, awsApiGatewayRestApi: AwsApiGatewayRestApiDetails? = nil, awsApiGatewayStage: AwsApiGatewayStageDetails? = nil, awsApiGatewayV2Api: AwsApiGatewayV2ApiDetails? = nil, awsApiGatewayV2Stage: AwsApiGatewayV2StageDetails? = nil, awsAppSyncGraphQlApi: AwsAppSyncGraphQlApiDetails? = nil, awsAthenaWorkGroup: AwsAthenaWorkGroupDetails? = nil, awsAutoScalingAutoScalingGroup: AwsAutoScalingAutoScalingGroupDetails? = nil, awsAutoScalingLaunchConfiguration: AwsAutoScalingLaunchConfigurationDetails? = nil, awsBackupBackupPlan: AwsBackupBackupPlanDetails? = nil, awsBackupBackupVault: AwsBackupBackupVaultDetails? = nil, awsBackupRecoveryPoint: AwsBackupRecoveryPointDetails? = nil, awsCertificateManagerCertificate: AwsCertificateManagerCertificateDetails? = nil, awsCloudFormationStack: AwsCloudFormationStackDetails? = nil, awsCloudFrontDistribution: AwsCloudFrontDistributionDetails? = nil, awsCloudTrailTrail: AwsCloudTrailTrailDetails? = nil, awsCloudWatchAlarm: AwsCloudWatchAlarmDetails? = nil, awsCodeBuildProject: AwsCodeBuildProjectDetails? = nil, awsDynamoDbTable: AwsDynamoDbTableDetails? = nil, awsEc2Eip: AwsEc2EipDetails? = nil, awsEc2Instance: AwsEc2InstanceDetails? = nil, awsEc2LaunchTemplate: AwsEc2LaunchTemplateDetails? = nil, awsEc2NetworkAcl: AwsEc2NetworkAclDetails? = nil, awsEc2NetworkInterface: AwsEc2NetworkInterfaceDetails? = nil, awsEc2RouteTable: AwsEc2RouteTableDetails? = nil, awsEc2SecurityGroup: AwsEc2SecurityGroupDetails? = nil, awsEc2Subnet: AwsEc2SubnetDetails? = nil, awsEc2TransitGateway: AwsEc2TransitGatewayDetails? = nil, awsEc2Volume: AwsEc2VolumeDetails? = nil, awsEc2Vpc: AwsEc2VpcDetails? = nil, awsEc2VpcEndpointService: AwsEc2VpcEndpointServiceDetails? = nil, awsEc2VpcPeeringConnection: AwsEc2VpcPeeringConnectionDetails? = nil, awsEc2VpnConnection: AwsEc2VpnConnectionDetails? = nil, awsEcrContainerImage: AwsEcrContainerImageDetails? = nil, awsEcrRepository: AwsEcrRepositoryDetails? = nil, awsEcsCluster: AwsEcsClusterDetails? = nil, awsEcsContainer: AwsEcsContainerDetails? = nil, awsEcsService: AwsEcsServiceDetails? = nil, awsEcsTask: AwsEcsTaskDetails? = nil, awsEcsTaskDefinition: AwsEcsTaskDefinitionDetails? = nil, awsEfsAccessPoint: AwsEfsAccessPointDetails? = nil, awsEksCluster: AwsEksClusterDetails? = nil, awsElasticBeanstalkEnvironment: AwsElasticBeanstalkEnvironmentDetails? = nil, awsElasticsearchDomain: AwsElasticsearchDomainDetails? = nil, awsElbLoadBalancer: AwsElbLoadBalancerDetails? = nil, awsElbv2LoadBalancer: AwsElbv2LoadBalancerDetails? = nil, awsEventSchemasRegistry: AwsEventSchemasRegistryDetails? = nil, awsGuardDutyDetector: AwsGuardDutyDetectorDetails? = nil, awsIamAccessKey: AwsIamAccessKeyDetails? = nil, awsIamGroup: AwsIamGroupDetails? = nil, awsIamPolicy: AwsIamPolicyDetails? = nil, awsIamRole: AwsIamRoleDetails? = nil, awsIamUser: AwsIamUserDetails? = nil, awsKinesisStream: AwsKinesisStreamDetails? = nil, awsKmsKey: AwsKmsKeyDetails? = nil, awsLambdaFunction: AwsLambdaFunctionDetails? = nil, awsLambdaLayerVersion: AwsLambdaLayerVersionDetails? = nil, awsNetworkFirewallFirewall: AwsNetworkFirewallFirewallDetails? = nil, awsNetworkFirewallFirewallPolicy: AwsNetworkFirewallFirewallPolicyDetails? = nil, awsNetworkFirewallRuleGroup: AwsNetworkFirewallRuleGroupDetails? = nil, awsOpenSearchServiceDomain: AwsOpenSearchServiceDomainDetails? = nil, awsRdsDbCluster: AwsRdsDbClusterDetails? = nil, awsRdsDbClusterSnapshot: AwsRdsDbClusterSnapshotDetails? = nil, awsRdsDbInstance: AwsRdsDbInstanceDetails? = nil, awsRdsDbSecurityGroup: AwsRdsDbSecurityGroupDetails? = nil, awsRdsDbSnapshot: AwsRdsDbSnapshotDetails? = nil, awsRdsEventSubscription: AwsRdsEventSubscriptionDetails? = nil, awsRedshiftCluster: AwsRedshiftClusterDetails? = nil, awsS3AccountPublicAccessBlock: AwsS3AccountPublicAccessBlockDetails? = nil, awsS3Bucket: AwsS3BucketDetails? = nil, awsS3Object: AwsS3ObjectDetails? = nil, awsSageMakerNotebookInstance: AwsSageMakerNotebookInstanceDetails? = nil, awsSecretsManagerSecret: AwsSecretsManagerSecretDetails? = nil, awsSnsTopic: AwsSnsTopicDetails? = nil, awsSqsQueue: AwsSqsQueueDetails? = nil, awsSsmPatchCompliance: AwsSsmPatchComplianceDetails? = nil, awsStepFunctionStateMachine: AwsStepFunctionStateMachineDetails? = nil, awsWafRateBasedRule: AwsWafRateBasedRuleDetails? = nil, awsWafRegionalRateBasedRule: AwsWafRegionalRateBasedRuleDetails? = nil, awsWafRegionalRule: AwsWafRegionalRuleDetails? = nil, awsWafRegionalRuleGroup: AwsWafRegionalRuleGroupDetails? = nil, awsWafRegionalWebAcl: AwsWafRegionalWebAclDetails? = nil, awsWafRule: AwsWafRuleDetails? = nil, awsWafRuleGroup: AwsWafRuleGroupDetails? = nil, awsWafv2RuleGroup: AwsWafv2RuleGroupDetails? = nil, awsWafv2WebAcl: AwsWafv2WebAclDetails? = nil, awsWafWebAcl: AwsWafWebAclDetails? = nil, awsXrayEncryptionConfig: AwsXrayEncryptionConfigDetails? = nil, container: ContainerDetails? = nil, other: [String: String]? = nil) { + public init(awsAmazonMqBroker: AwsAmazonMqBrokerDetails? = nil, awsApiGatewayRestApi: AwsApiGatewayRestApiDetails? = nil, awsApiGatewayStage: AwsApiGatewayStageDetails? = nil, awsApiGatewayV2Api: AwsApiGatewayV2ApiDetails? = nil, awsApiGatewayV2Stage: AwsApiGatewayV2StageDetails? = nil, awsAppSyncGraphQlApi: AwsAppSyncGraphQlApiDetails? = nil, awsAthenaWorkGroup: AwsAthenaWorkGroupDetails? = nil, awsAutoScalingAutoScalingGroup: AwsAutoScalingAutoScalingGroupDetails? = nil, awsAutoScalingLaunchConfiguration: AwsAutoScalingLaunchConfigurationDetails? = nil, awsBackupBackupPlan: AwsBackupBackupPlanDetails? = nil, awsBackupBackupVault: AwsBackupBackupVaultDetails? = nil, awsBackupRecoveryPoint: AwsBackupRecoveryPointDetails? = nil, awsCertificateManagerCertificate: AwsCertificateManagerCertificateDetails? = nil, awsCloudFormationStack: AwsCloudFormationStackDetails? = nil, awsCloudFrontDistribution: AwsCloudFrontDistributionDetails? = nil, awsCloudTrailTrail: AwsCloudTrailTrailDetails? = nil, awsCloudWatchAlarm: AwsCloudWatchAlarmDetails? = nil, awsCodeBuildProject: AwsCodeBuildProjectDetails? = nil, awsDmsEndpoint: AwsDmsEndpointDetails? = nil, awsDmsReplicationInstance: AwsDmsReplicationInstanceDetails? = nil, awsDmsReplicationTask: AwsDmsReplicationTaskDetails? = nil, awsDynamoDbTable: AwsDynamoDbTableDetails? = nil, awsEc2Eip: AwsEc2EipDetails? = nil, awsEc2Instance: AwsEc2InstanceDetails? = nil, awsEc2LaunchTemplate: AwsEc2LaunchTemplateDetails? = nil, awsEc2NetworkAcl: AwsEc2NetworkAclDetails? = nil, awsEc2NetworkInterface: AwsEc2NetworkInterfaceDetails? = nil, awsEc2RouteTable: AwsEc2RouteTableDetails? = nil, awsEc2SecurityGroup: AwsEc2SecurityGroupDetails? = nil, awsEc2Subnet: AwsEc2SubnetDetails? = nil, awsEc2TransitGateway: AwsEc2TransitGatewayDetails? = nil, awsEc2Volume: AwsEc2VolumeDetails? = nil, awsEc2Vpc: AwsEc2VpcDetails? = nil, awsEc2VpcEndpointService: AwsEc2VpcEndpointServiceDetails? = nil, awsEc2VpcPeeringConnection: AwsEc2VpcPeeringConnectionDetails? = nil, awsEc2VpnConnection: AwsEc2VpnConnectionDetails? = nil, awsEcrContainerImage: AwsEcrContainerImageDetails? = nil, awsEcrRepository: AwsEcrRepositoryDetails? = nil, awsEcsCluster: AwsEcsClusterDetails? = nil, awsEcsContainer: AwsEcsContainerDetails? = nil, awsEcsService: AwsEcsServiceDetails? = nil, awsEcsTask: AwsEcsTaskDetails? = nil, awsEcsTaskDefinition: AwsEcsTaskDefinitionDetails? = nil, awsEfsAccessPoint: AwsEfsAccessPointDetails? = nil, awsEksCluster: AwsEksClusterDetails? = nil, awsElasticBeanstalkEnvironment: AwsElasticBeanstalkEnvironmentDetails? = nil, awsElasticsearchDomain: AwsElasticsearchDomainDetails? = nil, awsElbLoadBalancer: AwsElbLoadBalancerDetails? = nil, awsElbv2LoadBalancer: AwsElbv2LoadBalancerDetails? = nil, awsEventSchemasRegistry: AwsEventSchemasRegistryDetails? = nil, awsEventsEndpoint: AwsEventsEndpointDetails? = nil, awsEventsEventbus: AwsEventsEventbusDetails? = nil, awsGuardDutyDetector: AwsGuardDutyDetectorDetails? = nil, awsIamAccessKey: AwsIamAccessKeyDetails? = nil, awsIamGroup: AwsIamGroupDetails? = nil, awsIamPolicy: AwsIamPolicyDetails? = nil, awsIamRole: AwsIamRoleDetails? = nil, awsIamUser: AwsIamUserDetails? = nil, awsKinesisStream: AwsKinesisStreamDetails? = nil, awsKmsKey: AwsKmsKeyDetails? = nil, awsLambdaFunction: AwsLambdaFunctionDetails? = nil, awsLambdaLayerVersion: AwsLambdaLayerVersionDetails? = nil, awsMskCluster: AwsMskClusterDetails? = nil, awsNetworkFirewallFirewall: AwsNetworkFirewallFirewallDetails? = nil, awsNetworkFirewallFirewallPolicy: AwsNetworkFirewallFirewallPolicyDetails? = nil, awsNetworkFirewallRuleGroup: AwsNetworkFirewallRuleGroupDetails? = nil, awsOpenSearchServiceDomain: AwsOpenSearchServiceDomainDetails? = nil, awsRdsDbCluster: AwsRdsDbClusterDetails? = nil, awsRdsDbClusterSnapshot: AwsRdsDbClusterSnapshotDetails? = nil, awsRdsDbInstance: AwsRdsDbInstanceDetails? = nil, awsRdsDbSecurityGroup: AwsRdsDbSecurityGroupDetails? = nil, awsRdsDbSnapshot: AwsRdsDbSnapshotDetails? = nil, awsRdsEventSubscription: AwsRdsEventSubscriptionDetails? = nil, awsRedshiftCluster: AwsRedshiftClusterDetails? = nil, awsRoute53HostedZone: AwsRoute53HostedZoneDetails? = nil, awsS3AccountPublicAccessBlock: AwsS3AccountPublicAccessBlockDetails? = nil, awsS3Bucket: AwsS3BucketDetails? = nil, awsS3Object: AwsS3ObjectDetails? = nil, awsSageMakerNotebookInstance: AwsSageMakerNotebookInstanceDetails? = nil, awsSecretsManagerSecret: AwsSecretsManagerSecretDetails? = nil, awsSnsTopic: AwsSnsTopicDetails? = nil, awsSqsQueue: AwsSqsQueueDetails? = nil, awsSsmPatchCompliance: AwsSsmPatchComplianceDetails? = nil, awsStepFunctionStateMachine: AwsStepFunctionStateMachineDetails? = nil, awsWafRateBasedRule: AwsWafRateBasedRuleDetails? = nil, awsWafRegionalRateBasedRule: AwsWafRegionalRateBasedRuleDetails? = nil, awsWafRegionalRule: AwsWafRegionalRuleDetails? = nil, awsWafRegionalRuleGroup: AwsWafRegionalRuleGroupDetails? = nil, awsWafRegionalWebAcl: AwsWafRegionalWebAclDetails? = nil, awsWafRule: AwsWafRuleDetails? = nil, awsWafRuleGroup: AwsWafRuleGroupDetails? = nil, awsWafv2RuleGroup: AwsWafv2RuleGroupDetails? = nil, awsWafv2WebAcl: AwsWafv2WebAclDetails? = nil, awsWafWebAcl: AwsWafWebAclDetails? = nil, awsXrayEncryptionConfig: AwsXrayEncryptionConfigDetails? = nil, container: ContainerDetails? = nil, other: [String: String]? = nil) { self.awsAmazonMqBroker = awsAmazonMqBroker self.awsApiGatewayRestApi = awsApiGatewayRestApi self.awsApiGatewayStage = awsApiGatewayStage @@ -22412,6 +23270,9 @@ extension SecurityHub { self.awsCloudTrailTrail = awsCloudTrailTrail self.awsCloudWatchAlarm = awsCloudWatchAlarm self.awsCodeBuildProject = awsCodeBuildProject + self.awsDmsEndpoint = awsDmsEndpoint + self.awsDmsReplicationInstance = awsDmsReplicationInstance + self.awsDmsReplicationTask = awsDmsReplicationTask self.awsDynamoDbTable = awsDynamoDbTable self.awsEc2Eip = awsEc2Eip self.awsEc2Instance = awsEc2Instance @@ -22441,6 +23302,8 @@ extension SecurityHub { self.awsElbLoadBalancer = awsElbLoadBalancer self.awsElbv2LoadBalancer = awsElbv2LoadBalancer self.awsEventSchemasRegistry = awsEventSchemasRegistry + self.awsEventsEndpoint = awsEventsEndpoint + self.awsEventsEventbus = awsEventsEventbus self.awsGuardDutyDetector = awsGuardDutyDetector self.awsIamAccessKey = awsIamAccessKey self.awsIamGroup = awsIamGroup @@ -22451,6 +23314,7 @@ extension SecurityHub { self.awsKmsKey = awsKmsKey self.awsLambdaFunction = awsLambdaFunction self.awsLambdaLayerVersion = awsLambdaLayerVersion + self.awsMskCluster = awsMskCluster self.awsNetworkFirewallFirewall = awsNetworkFirewallFirewall self.awsNetworkFirewallFirewallPolicy = awsNetworkFirewallFirewallPolicy self.awsNetworkFirewallRuleGroup = awsNetworkFirewallRuleGroup @@ -22462,6 +23326,7 @@ extension SecurityHub { self.awsRdsDbSnapshot = awsRdsDbSnapshot self.awsRdsEventSubscription = awsRdsEventSubscription self.awsRedshiftCluster = awsRedshiftCluster + self.awsRoute53HostedZone = awsRoute53HostedZone self.awsS3AccountPublicAccessBlock = awsS3AccountPublicAccessBlock self.awsS3Bucket = awsS3Bucket self.awsS3Object = awsS3Object @@ -22505,6 +23370,9 @@ extension SecurityHub { try self.awsCloudTrailTrail?.validate(name: "\(name).awsCloudTrailTrail") try self.awsCloudWatchAlarm?.validate(name: "\(name).awsCloudWatchAlarm") try self.awsCodeBuildProject?.validate(name: "\(name).awsCodeBuildProject") + try self.awsDmsEndpoint?.validate(name: "\(name).awsDmsEndpoint") + try self.awsDmsReplicationInstance?.validate(name: "\(name).awsDmsReplicationInstance") + try self.awsDmsReplicationTask?.validate(name: "\(name).awsDmsReplicationTask") try self.awsDynamoDbTable?.validate(name: "\(name).awsDynamoDbTable") try self.awsEc2Eip?.validate(name: "\(name).awsEc2Eip") try self.awsEc2Instance?.validate(name: "\(name).awsEc2Instance") @@ -22534,6 +23402,8 @@ extension SecurityHub { try self.awsElbLoadBalancer?.validate(name: "\(name).awsElbLoadBalancer") try self.awsElbv2LoadBalancer?.validate(name: "\(name).awsElbv2LoadBalancer") try self.awsEventSchemasRegistry?.validate(name: "\(name).awsEventSchemasRegistry") + try self.awsEventsEndpoint?.validate(name: "\(name).awsEventsEndpoint") + try self.awsEventsEventbus?.validate(name: "\(name).awsEventsEventbus") try self.awsGuardDutyDetector?.validate(name: "\(name).awsGuardDutyDetector") try self.awsIamAccessKey?.validate(name: "\(name).awsIamAccessKey") try self.awsIamGroup?.validate(name: "\(name).awsIamGroup") @@ -22544,6 +23414,7 @@ extension SecurityHub { try self.awsKmsKey?.validate(name: "\(name).awsKmsKey") try self.awsLambdaFunction?.validate(name: "\(name).awsLambdaFunction") try self.awsLambdaLayerVersion?.validate(name: "\(name).awsLambdaLayerVersion") + try self.awsMskCluster?.validate(name: "\(name).awsMskCluster") try self.awsNetworkFirewallFirewall?.validate(name: "\(name).awsNetworkFirewallFirewall") try self.awsNetworkFirewallFirewallPolicy?.validate(name: "\(name).awsNetworkFirewallFirewallPolicy") try self.awsNetworkFirewallRuleGroup?.validate(name: "\(name).awsNetworkFirewallRuleGroup") @@ -22555,6 +23426,7 @@ extension SecurityHub { try self.awsRdsDbSnapshot?.validate(name: "\(name).awsRdsDbSnapshot") try self.awsRdsEventSubscription?.validate(name: "\(name).awsRdsEventSubscription") try self.awsRedshiftCluster?.validate(name: "\(name).awsRedshiftCluster") + try self.awsRoute53HostedZone?.validate(name: "\(name).awsRoute53HostedZone") try self.awsS3Bucket?.validate(name: "\(name).awsS3Bucket") try self.awsS3Object?.validate(name: "\(name).awsS3Object") try self.awsSageMakerNotebookInstance?.validate(name: "\(name).awsSageMakerNotebookInstance") @@ -22600,6 +23472,9 @@ extension SecurityHub { case awsCloudTrailTrail = "AwsCloudTrailTrail" case awsCloudWatchAlarm = "AwsCloudWatchAlarm" case awsCodeBuildProject = "AwsCodeBuildProject" + case awsDmsEndpoint = "AwsDmsEndpoint" + case awsDmsReplicationInstance = "AwsDmsReplicationInstance" + case awsDmsReplicationTask = "AwsDmsReplicationTask" case awsDynamoDbTable = "AwsDynamoDbTable" case awsEc2Eip = "AwsEc2Eip" case awsEc2Instance = "AwsEc2Instance" @@ -22629,6 +23504,8 @@ extension SecurityHub { case awsElbLoadBalancer = "AwsElbLoadBalancer" case awsElbv2LoadBalancer = "AwsElbv2LoadBalancer" case awsEventSchemasRegistry = "AwsEventSchemasRegistry" + case awsEventsEndpoint = "AwsEventsEndpoint" + case awsEventsEventbus = "AwsEventsEventbus" case awsGuardDutyDetector = "AwsGuardDutyDetector" case awsIamAccessKey = "AwsIamAccessKey" case awsIamGroup = "AwsIamGroup" @@ -22639,6 +23516,7 @@ extension SecurityHub { case awsKmsKey = "AwsKmsKey" case awsLambdaFunction = "AwsLambdaFunction" case awsLambdaLayerVersion = "AwsLambdaLayerVersion" + case awsMskCluster = "AwsMskCluster" case awsNetworkFirewallFirewall = "AwsNetworkFirewallFirewall" case awsNetworkFirewallFirewallPolicy = "AwsNetworkFirewallFirewallPolicy" case awsNetworkFirewallRuleGroup = "AwsNetworkFirewallRuleGroup" @@ -22650,6 +23528,7 @@ extension SecurityHub { case awsRdsDbSnapshot = "AwsRdsDbSnapshot" case awsRdsEventSubscription = "AwsRdsEventSubscription" case awsRedshiftCluster = "AwsRedshiftCluster" + case awsRoute53HostedZone = "AwsRoute53HostedZone" case awsS3AccountPublicAccessBlock = "AwsS3AccountPublicAccessBlock" case awsS3Bucket = "AwsS3Bucket" case awsS3Object = "AwsS3Object" diff --git a/Sources/Soto/Services/SimSpaceWeaver/SimSpaceWeaver_api.swift b/Sources/Soto/Services/SimSpaceWeaver/SimSpaceWeaver_api.swift index f56284b11c..95056f12a2 100644 --- a/Sources/Soto/Services/SimSpaceWeaver/SimSpaceWeaver_api.swift +++ b/Sources/Soto/Services/SimSpaceWeaver/SimSpaceWeaver_api.swift @@ -54,6 +54,10 @@ public struct SimSpaceWeaver: AWSService { serviceProtocol: .restjson, apiVersion: "2022-10-28", endpoint: endpoint, + serviceEndpoints: [ + "us-gov-east-1": "simspaceweaver.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "simspaceweaver.us-gov-west-1.amazonaws.com" + ], errorType: SimSpaceWeaverErrorType.self, timeout: timeout, byteBufferAllocator: byteBufferAllocator, diff --git a/Sources/Soto/Services/StorageGateway/StorageGateway_api+async.swift b/Sources/Soto/Services/StorageGateway/StorageGateway_api+async.swift index e62b819e78..571e6734d0 100644 --- a/Sources/Soto/Services/StorageGateway/StorageGateway_api+async.swift +++ b/Sources/Soto/Services/StorageGateway/StorageGateway_api+async.swift @@ -201,7 +201,7 @@ extension StorageGateway { return try await self.client.execute(operation: "DescribeFileSystemAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request. + /// Returns metadata about a gateway such as its name, network interfaces, time zone, status, and software version. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request. public func describeGatewayInformation(_ input: DescribeGatewayInformationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeGatewayInformationOutput { return try await self.client.execute(operation: "DescribeGatewayInformation", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -281,7 +281,7 @@ extension StorageGateway { return try await self.client.execute(operation: "DisassociateFileSystem", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol. + /// Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol. Joining a domain creates an Active Directory computer account in the default organizational unit, using the gateway's Gateway ID as the account name (for example, SGW-1234ADE). If your Active Directory environment requires that you pre-stage accounts to facilitate the join domain process, you will need to create this account ahead of time. To create the gateway's computer account in an organizational unit other than the default, you must specify the organizational unit when joining the domain. public func joinDomain(_ input: JoinDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> JoinDomainOutput { return try await self.client.execute(operation: "JoinDomain", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -291,7 +291,7 @@ extension StorageGateway { return try await self.client.execute(operation: "ListAutomaticTapeCreationPolicies", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling user account. This operation is only supported for S3 File Gateways. + /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling Amazon Web Services account. This operation is only supported for S3 File Gateways. public func listFileShares(_ input: ListFileSharesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListFileSharesOutput { return try await self.client.execute(operation: "ListFileShares", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -341,12 +341,12 @@ extension StorageGateway { return try await self.client.execute(operation: "ListVolumes", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to S3. Amazon S3. Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways. For more information, see Getting file upload notification in the Storage Gateway User Guide. + /// Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to S3. Amazon S3. Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways. For more information, see Getting file upload notification in the Amazon S3 File Gateway User Guide. public func notifyWhenUploaded(_ input: NotifyWhenUploadedInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> NotifyWhenUploadedOutput { return try await self.client.execute(operation: "NotifyWhenUploaded", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Refreshes the cached inventory of objects for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation does not import files into the S3 File Gateway cache storage. It only updates the cached inventory to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway User Guide. This operation is Only supported for S3 File Gateways. When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through a CloudWatch event when your RefreshCache operation completes. Throttle limit: This API is asynchronous, so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway User Guide. Wait at least 60 seconds between consecutive RefreshCache API requests. RefreshCache does not evict cache entries if invoked consecutively within 60 seconds of a previous RefreshCache request. If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server. The S3 bucket name does not need to be included when entering the list of folders in the FolderList parameter. For more information, see Getting notified about file operations in the Storage Gateway User Guide. + /// Refreshes the cached inventory of objects for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation does not import files into the S3 File Gateway cache storage. It only updates the cached inventory to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway User Guide. This operation is Only supported for S3 File Gateways. When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through a CloudWatch event when your RefreshCache operation completes. Throttle limit: This API is asynchronous, so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway User Guide. Wait at least 60 seconds between consecutive RefreshCache API requests. If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server. The S3 bucket name does not need to be included when entering the list of folders in the FolderList parameter. For more information, see Getting notified about file operations in the Storage Gateway User Guide. public func refreshCache(_ input: RefreshCacheInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> RefreshCacheOutput { return try await self.client.execute(operation: "RefreshCache", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -406,7 +406,7 @@ extension StorageGateway { return try await self.client.execute(operation: "UpdateBandwidthRateLimit", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This operation is supported only for volume, tape and S3 file gateways. FSx file gateways do not support bandwidth rate limits. + /// Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This operation is supported for volume, tape, and S3 file gateways. S3 file gateways support bandwidth rate limits for upload only. FSx file gateways do not support bandwidth rate limits. public func updateBandwidthRateLimitSchedule(_ input: UpdateBandwidthRateLimitScheduleInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateBandwidthRateLimitScheduleOutput { return try await self.client.execute(operation: "UpdateBandwidthRateLimitSchedule", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -564,7 +564,7 @@ extension StorageGateway { ) } - /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling user account. This operation is only supported for S3 File Gateways. + /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling Amazon Web Services account. This operation is only supported for S3 File Gateways. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift b/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift index a275e0a333..d607386144 100644 --- a/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift +++ b/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift @@ -256,7 +256,7 @@ public struct StorageGateway: AWSService { return self.client.execute(operation: "DescribeFileSystemAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request. + /// Returns metadata about a gateway such as its name, network interfaces, time zone, status, and software version. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request. public func describeGatewayInformation(_ input: DescribeGatewayInformationInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeGatewayInformation", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -336,7 +336,7 @@ public struct StorageGateway: AWSService { return self.client.execute(operation: "DisassociateFileSystem", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol. + /// Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol. Joining a domain creates an Active Directory computer account in the default organizational unit, using the gateway's Gateway ID as the account name (for example, SGW-1234ADE). If your Active Directory environment requires that you pre-stage accounts to facilitate the join domain process, you will need to create this account ahead of time. To create the gateway's computer account in an organizational unit other than the default, you must specify the organizational unit when joining the domain. public func joinDomain(_ input: JoinDomainInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "JoinDomain", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -346,7 +346,7 @@ public struct StorageGateway: AWSService { return self.client.execute(operation: "ListAutomaticTapeCreationPolicies", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling user account. This operation is only supported for S3 File Gateways. + /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling Amazon Web Services account. This operation is only supported for S3 File Gateways. public func listFileShares(_ input: ListFileSharesInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ListFileShares", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -396,12 +396,12 @@ public struct StorageGateway: AWSService { return self.client.execute(operation: "ListVolumes", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to S3. Amazon S3. Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways. For more information, see Getting file upload notification in the Storage Gateway User Guide. + /// Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to S3. Amazon S3. Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways. For more information, see Getting file upload notification in the Amazon S3 File Gateway User Guide. public func notifyWhenUploaded(_ input: NotifyWhenUploadedInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "NotifyWhenUploaded", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Refreshes the cached inventory of objects for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation does not import files into the S3 File Gateway cache storage. It only updates the cached inventory to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway User Guide. This operation is Only supported for S3 File Gateways. When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through a CloudWatch event when your RefreshCache operation completes. Throttle limit: This API is asynchronous, so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway User Guide. Wait at least 60 seconds between consecutive RefreshCache API requests. RefreshCache does not evict cache entries if invoked consecutively within 60 seconds of a previous RefreshCache request. If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server. The S3 bucket name does not need to be included when entering the list of folders in the FolderList parameter. For more information, see Getting notified about file operations in the Storage Gateway User Guide. + /// Refreshes the cached inventory of objects for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation does not import files into the S3 File Gateway cache storage. It only updates the cached inventory to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway User Guide. This operation is Only supported for S3 File Gateways. When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through a CloudWatch event when your RefreshCache operation completes. Throttle limit: This API is asynchronous, so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway User Guide. Wait at least 60 seconds between consecutive RefreshCache API requests. If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server. The S3 bucket name does not need to be included when entering the list of folders in the FolderList parameter. For more information, see Getting notified about file operations in the Storage Gateway User Guide. public func refreshCache(_ input: RefreshCacheInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "RefreshCache", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -461,7 +461,7 @@ public struct StorageGateway: AWSService { return self.client.execute(operation: "UpdateBandwidthRateLimit", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This operation is supported only for volume, tape and S3 file gateways. FSx file gateways do not support bandwidth rate limits. + /// Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This operation is supported for volume, tape, and S3 file gateways. S3 file gateways support bandwidth rate limits for upload only. FSx file gateways do not support bandwidth rate limits. public func updateBandwidthRateLimitSchedule(_ input: UpdateBandwidthRateLimitScheduleInput, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "UpdateBandwidthRateLimitSchedule", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -751,7 +751,7 @@ extension StorageGateway { ) } - /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling user account. This operation is only supported for S3 File Gateways. + /// Gets a list of the file shares for a specific S3 File Gateway, or the list of file shares that belong to the calling Amazon Web Services account. This operation is only supported for S3 File Gateways. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. diff --git a/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift b/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift index 250364a2f3..ea77e6e47a 100644 --- a/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift +++ b/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift @@ -354,7 +354,7 @@ extension StorageGateway { try self.validate(self.poolId, name: "poolId", parent: name, min: 1) try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -570,7 +570,7 @@ extension StorageGateway { public struct BandwidthRateLimitInterval: AWSEncodableShape & AWSDecodableShape { /// The average download rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the download rate limit is not set. public let averageDownloadRateLimitInBitsPerSec: Int64? - /// The average upload rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the upload rate limit is not set. + /// The average upload rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the upload rate limit is not set. For Tape Gateway and Volume Gateway, the minimum value is 51200. For S3 File Gateway and FSx File Gateway, the minimum value is 104857600. public let averageUploadRateLimitInBitsPerSec: Int64? /// The days of the week component of the bandwidth rate limit interval, represented as ordinal numbers from 0 to 6, where 0 represents Sunday and 6 represents Saturday. public let daysOfWeek: [Int] @@ -711,7 +711,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -748,7 +748,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -836,7 +836,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.networkInterfaceId, name: "networkInterfaceId", parent: name, pattern: "^\\A(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\z$") try self.validate(self.snapshotId, name: "snapshotId", parent: name, pattern: "^\\Asnap-([0-9A-Fa-f]{8}|[0-9A-Fa-f]{17})\\z$") try self.validate(self.sourceVolumeARN, name: "sourceVolumeARN", parent: name, max: 500) @@ -967,7 +967,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.locationARN, name: "locationARN", parent: name, max: 1400) try self.validate(self.locationARN, name: "locationARN", parent: name, min: 16) try self.nfsFileShareDefaults?.validate(name: "\(name).nfsFileShareDefaults") @@ -976,7 +976,7 @@ extension StorageGateway { try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, pattern: "^\\{[\\w\\s:\\{\\}\\[\\]\"]*}$") try self.validate(self.role, name: "role", parent: name, max: 2048) try self.validate(self.role, name: "role", parent: name, min: 20) - try self.validate(self.role, name: "role", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):iam::([0-9]+):role/(\\S+)$") + try self.validate(self.role, name: "role", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):iam::([0-9]+):role/(\\S+)$") try self.validate(self.squash, name: "squash", parent: name, max: 15) try self.validate(self.squash, name: "squash", parent: name, min: 5) try self.tags?.forEach { @@ -1134,7 +1134,7 @@ extension StorageGateway { try self.validate(self.invalidUserList, name: "invalidUserList", parent: name, max: 100) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.locationARN, name: "locationARN", parent: name, max: 1400) try self.validate(self.locationARN, name: "locationARN", parent: name, min: 16) try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, max: 100) @@ -1142,7 +1142,7 @@ extension StorageGateway { try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, pattern: "^\\{[\\w\\s:\\{\\}\\[\\]\"]*}$") try self.validate(self.role, name: "role", parent: name, max: 2048) try self.validate(self.role, name: "role", parent: name, min: 20) - try self.validate(self.role, name: "role", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):iam::([0-9]+):role/(\\S+)$") + try self.validate(self.role, name: "role", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):iam::([0-9]+):role/(\\S+)$") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") } @@ -1337,7 +1337,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.networkInterfaceId, name: "networkInterfaceId", parent: name, pattern: "^\\A(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\z$") try self.validate(self.snapshotId, name: "snapshotId", parent: name, pattern: "^\\Asnap-([0-9A-Fa-f]{8}|[0-9A-Fa-f]{17})\\z$") try self.tags?.forEach { @@ -1469,7 +1469,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.poolId, name: "poolId", parent: name, max: 100) try self.validate(self.poolId, name: "poolId", parent: name, min: 1) try self.tags?.forEach { @@ -1547,7 +1547,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.numTapesToCreate, name: "numTapesToCreate", parent: name, max: 10) try self.validate(self.numTapesToCreate, name: "numTapesToCreate", parent: name, min: 1) try self.validate(self.poolId, name: "poolId", parent: name, max: 100) @@ -1802,7 +1802,7 @@ extension StorageGateway { public func validate(name: String) throws { try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -1843,7 +1843,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -2240,6 +2240,8 @@ extension StorageGateway { public let nextUpdateAvailabilityDate: String? /// Date after which this gateway will not receive software updates for new features. public let softwareUpdatesEndDate: String? + /// The version number of the software running on the gateway appliance. + public let softwareVersion: String? /// A list of the metadata cache sizes that the gateway can support based on its current hardware specifications. public let supportedGatewayCapacities: [GatewayCapacity]? /// A list of up to 50 tags assigned to the gateway, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ListTagsForResource API operation. @@ -2247,7 +2249,7 @@ extension StorageGateway { /// The configuration settings for the virtual private cloud (VPC) endpoint for your gateway. public let vpcEndpoint: String? - public init(cloudWatchLogGroupARN: String? = nil, deprecationDate: String? = nil, ec2InstanceId: String? = nil, ec2InstanceRegion: String? = nil, endpointType: String? = nil, gatewayARN: String? = nil, gatewayCapacity: GatewayCapacity? = nil, gatewayId: String? = nil, gatewayName: String? = nil, gatewayNetworkInterfaces: [NetworkInterface]? = nil, gatewayState: String? = nil, gatewayTimezone: String? = nil, gatewayType: String? = nil, hostEnvironment: HostEnvironment? = nil, hostEnvironmentId: String? = nil, lastSoftwareUpdate: String? = nil, nextUpdateAvailabilityDate: String? = nil, softwareUpdatesEndDate: String? = nil, supportedGatewayCapacities: [GatewayCapacity]? = nil, tags: [Tag]? = nil, vpcEndpoint: String? = nil) { + public init(cloudWatchLogGroupARN: String? = nil, deprecationDate: String? = nil, ec2InstanceId: String? = nil, ec2InstanceRegion: String? = nil, endpointType: String? = nil, gatewayARN: String? = nil, gatewayCapacity: GatewayCapacity? = nil, gatewayId: String? = nil, gatewayName: String? = nil, gatewayNetworkInterfaces: [NetworkInterface]? = nil, gatewayState: String? = nil, gatewayTimezone: String? = nil, gatewayType: String? = nil, hostEnvironment: HostEnvironment? = nil, hostEnvironmentId: String? = nil, lastSoftwareUpdate: String? = nil, nextUpdateAvailabilityDate: String? = nil, softwareUpdatesEndDate: String? = nil, softwareVersion: String? = nil, supportedGatewayCapacities: [GatewayCapacity]? = nil, tags: [Tag]? = nil, vpcEndpoint: String? = nil) { self.cloudWatchLogGroupARN = cloudWatchLogGroupARN self.deprecationDate = deprecationDate self.ec2InstanceId = ec2InstanceId @@ -2266,6 +2268,7 @@ extension StorageGateway { self.lastSoftwareUpdate = lastSoftwareUpdate self.nextUpdateAvailabilityDate = nextUpdateAvailabilityDate self.softwareUpdatesEndDate = softwareUpdatesEndDate + self.softwareVersion = softwareVersion self.supportedGatewayCapacities = supportedGatewayCapacities self.tags = tags self.vpcEndpoint = vpcEndpoint @@ -2290,6 +2293,7 @@ extension StorageGateway { case lastSoftwareUpdate = "LastSoftwareUpdate" case nextUpdateAvailabilityDate = "NextUpdateAvailabilityDate" case softwareUpdatesEndDate = "SoftwareUpdatesEndDate" + case softwareVersion = "SoftwareVersion" case supportedGatewayCapacities = "SupportedGatewayCapacities" case tags = "Tags" case vpcEndpoint = "VPCEndpoint" @@ -2573,7 +2577,7 @@ extension StorageGateway { try self.tapeARNs?.forEach { try validate($0, name: "tapeARNs[]", parent: name, max: 500) try validate($0, name: "tapeARNs[]", parent: name, min: 50) - try validate($0, name: "tapeARNs[]", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try validate($0, name: "tapeARNs[]", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } } @@ -2674,7 +2678,7 @@ extension StorageGateway { try self.tapeARNs?.forEach { try validate($0, name: "tapeARNs[]", parent: name, max: 500) try validate($0, name: "tapeARNs[]", parent: name, min: 50) - try validate($0, name: "tapeARNs[]", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try validate($0, name: "tapeARNs[]", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } } @@ -3573,7 +3577,7 @@ extension StorageGateway { try self.tapeARNs?.forEach { try validate($0, name: "tapeARNs[]", parent: name, max: 500) try validate($0, name: "tapeARNs[]", parent: name, min: 50) - try validate($0, name: "tapeARNs[]", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try validate($0, name: "tapeARNs[]", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } } @@ -4061,7 +4065,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -4098,7 +4102,7 @@ extension StorageGateway { try self.validate(self.gatewayARN, name: "gatewayARN", parent: name, min: 50) try self.validate(self.tapeARN, name: "tapeARN", parent: name, max: 500) try self.validate(self.tapeARN, name: "tapeARN", parent: name, min: 50) - try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") + try self.validate(self.tapeARN, name: "tapeARN", parent: name, pattern: "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$") } private enum CodingKeys: String, CodingKey { @@ -5117,7 +5121,7 @@ extension StorageGateway { try self.validate(self.fileShareName, name: "fileShareName", parent: name, min: 1) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.nfsFileShareDefaults?.validate(name: "\(name).nfsFileShareDefaults") try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, max: 100) try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, min: 2) @@ -5240,7 +5244,7 @@ extension StorageGateway { try self.validate(self.invalidUserList, name: "invalidUserList", parent: name, max: 100) try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 2048) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 7) - try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") + try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$") try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, max: 100) try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, min: 2) try self.validate(self.notificationPolicy, name: "notificationPolicy", parent: name, pattern: "^\\{[\\w\\s:\\{\\}\\[\\]\"]*}$") diff --git a/Sources/Soto/Services/Textract/Textract_api+async.swift b/Sources/Soto/Services/Textract/Textract_api+async.swift index e689fdff57..dc104fb21b 100644 --- a/Sources/Soto/Services/Textract/Textract_api+async.swift +++ b/Sources/Soto/Services/Textract/Textract_api+async.swift @@ -36,11 +36,41 @@ extension Textract { return try await self.client.execute(operation: "AnalyzeID", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Creates an adapter, which can be fine-tuned for enhanced performance on user provided documents. Takes an AdapterName and FeatureType. Currently the only supported feature type is QUERIES. You can also provide a Description, Tags, and a ClientRequestToken. You can choose whether or not the adapter should be AutoUpdated with the AutoUpdate argument. By default, AutoUpdate is set to DISABLED. + public func createAdapter(_ input: CreateAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateAdapterResponse { + return try await self.client.execute(operation: "CreateAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a new version of an adapter. Operates on a provided AdapterId and a specified dataset provided via the DatasetConfig argument. Requires that you specify an Amazon S3 bucket with the OutputConfig argument. You can provide an optional KMSKeyId, an optional ClientRequestToken, and optional tags. + public func createAdapterVersion(_ input: CreateAdapterVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateAdapterVersionResponse { + return try await self.client.execute(operation: "CreateAdapterVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an Amazon Textract adapter. Takes an AdapterId and deletes the adapter specified by the ID. + public func deleteAdapter(_ input: DeleteAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteAdapterResponse { + return try await self.client.execute(operation: "DeleteAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an Amazon Textract adapter version. Requires that you specify both an AdapterId and a AdapterVersion. Deletes the adapter version specified by the AdapterId and the AdapterVersion. + public func deleteAdapterVersion(_ input: DeleteAdapterVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeleteAdapterVersionResponse { + return try await self.client.execute(operation: "DeleteAdapterVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Detects text in the input document. Amazon Textract can detect lines of text and the words that make up a line of text. The input document must be in one of the following image formats: JPEG, PNG, PDF, or TIFF. DetectDocumentText returns the detected text in an array of Block objects. Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD. DetectDocumentText is a synchronous operation. To analyze documents asynchronously, use StartDocumentTextDetection. For more information, see Document Text Detection. public func detectDocumentText(_ input: DetectDocumentTextRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DetectDocumentTextResponse { return try await self.client.execute(operation: "DetectDocumentText", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Gets configuration information for an adapter specified by an AdapterId, returning information on AdapterName, Description, CreationTime, AutoUpdate status, and FeatureTypes. + public func getAdapter(_ input: GetAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetAdapterResponse { + return try await self.client.execute(operation: "GetAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets configuration information for the specified adapter version, including: AdapterId, AdapterVersion, FeatureTypes, Status, StatusMessage, DatasetConfig, KMSKeyId, OutputConfig, Tags and EvaluationMetrics. + public func getAdapterVersion(_ input: GetAdapterVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetAdapterVersionResponse { + return try await self.client.execute(operation: "GetAdapterVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Gets the results for an Amazon Textract asynchronous operation that analyzes text in a document. You start asynchronous text analysis by calling StartDocumentAnalysis, which returns a job identifier (JobId). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentAnalysis. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis. GetDocumentAnalysis returns an array of Block objects. The following types of information are returned: Form data (key-value pairs). The related information is returned in two Block objects, each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value. Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table. Lines and words of text. A LINE Block object contains one or more WORD Block objects. All lines and words that are detected in the document are returned (including text that doesn't have a relationship with the value of the StartDocumentAnalysis FeatureTypes input parameter). Query. A QUERY Block object contains the query text, alias and link to the associated Query results block object. Query Results. A QUERY_RESULT Block object contains the answer to the query and an ID that connects it to the query asked. This Block also contains a confidence score. While processing a document with queries, look out for INVALID_REQUEST_PARAMETERS output. This indicates that either the per page query limit has been exceeded or that the operation is trying to query a page in the document which doesn’t exist. Selection elements such as check boxes and option buttons (radio buttons) can be detected in form data and in tables. A SELECTION_ELEMENT Block object contains information about a selection element, including the selection status. Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentAnalysis, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentAnalysis. For more information, see Document Text Analysis. public func getDocumentAnalysis(_ input: GetDocumentAnalysisRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> GetDocumentAnalysisResponse { return try await self.client.execute(operation: "GetDocumentAnalysis", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -66,6 +96,21 @@ extension Textract { return try await self.client.execute(operation: "GetLendingAnalysisSummary", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// List all version of an adapter that meet the specified filtration criteria. + public func listAdapterVersions(_ input: ListAdapterVersionsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListAdapterVersionsResponse { + return try await self.client.execute(operation: "ListAdapterVersions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all adapters that match the specified filtration criteria. + public func listAdapters(_ input: ListAdaptersRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListAdaptersResponse { + return try await self.client.execute(operation: "ListAdapters", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all tags for an Amazon Textract resource. + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ListTagsForResourceResponse { + return try await self.client.execute(operation: "ListTagsForResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Starts the asynchronous analysis of an input document for relationships between detected items such as key-value pairs, tables, and selection elements. StartDocumentAnalysis can analyze text in documents that are in JPEG, PNG, TIFF, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document. StartDocumentAnalysis returns a job identifier (JobId) that you use to get the results of the operation. When text analysis is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis. For more information, see Document Text Analysis. public func startDocumentAnalysis(_ input: StartDocumentAnalysisRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StartDocumentAnalysisResponse { return try await self.client.execute(operation: "StartDocumentAnalysis", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -85,4 +130,68 @@ extension Textract { public func startLendingAnalysis(_ input: StartLendingAnalysisRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> StartLendingAnalysisResponse { return try await self.client.execute(operation: "StartLendingAnalysis", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + + /// Adds one or more tags to the specified resource. + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> TagResourceResponse { + return try await self.client.execute(operation: "TagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Removes any tags with the specified keys from the specified resource. + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UntagResourceResponse { + return try await self.client.execute(operation: "UntagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Update the configuration for an adapter. FeatureTypes configurations cannot be updated. At least one new parameter must be specified as an argument. + public func updateAdapter(_ input: UpdateAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> UpdateAdapterResponse { + return try await self.client.execute(operation: "UpdateAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension Textract { + /// List all version of an adapter that meet the specified filtration criteria. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listAdapterVersionsPaginator( + _ input: ListAdapterVersionsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAdapterVersions, + inputKey: \ListAdapterVersionsRequest.nextToken, + outputKey: \ListAdapterVersionsResponse.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Lists all adapters that match the specified filtration criteria. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func listAdaptersPaginator( + _ input: ListAdaptersRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAdapters, + inputKey: \ListAdaptersRequest.nextToken, + outputKey: \ListAdaptersResponse.nextToken, + logger: logger, + on: eventLoop + ) + } } diff --git a/Sources/Soto/Services/Textract/Textract_api.swift b/Sources/Soto/Services/Textract/Textract_api.swift index 71bba539ae..5f08f5dd5e 100644 --- a/Sources/Soto/Services/Textract/Textract_api.swift +++ b/Sources/Soto/Services/Textract/Textract_api.swift @@ -90,11 +90,41 @@ public struct Textract: AWSService { return self.client.execute(operation: "AnalyzeID", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Creates an adapter, which can be fine-tuned for enhanced performance on user provided documents. Takes an AdapterName and FeatureType. Currently the only supported feature type is QUERIES. You can also provide a Description, Tags, and a ClientRequestToken. You can choose whether or not the adapter should be AutoUpdated with the AutoUpdate argument. By default, AutoUpdate is set to DISABLED. + public func createAdapter(_ input: CreateAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Creates a new version of an adapter. Operates on a provided AdapterId and a specified dataset provided via the DatasetConfig argument. Requires that you specify an Amazon S3 bucket with the OutputConfig argument. You can provide an optional KMSKeyId, an optional ClientRequestToken, and optional tags. + public func createAdapterVersion(_ input: CreateAdapterVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "CreateAdapterVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an Amazon Textract adapter. Takes an AdapterId and deletes the adapter specified by the ID. + public func deleteAdapter(_ input: DeleteAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Deletes an Amazon Textract adapter version. Requires that you specify both an AdapterId and a AdapterVersion. Deletes the adapter version specified by the AdapterId and the AdapterVersion. + public func deleteAdapterVersion(_ input: DeleteAdapterVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeleteAdapterVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Detects text in the input document. Amazon Textract can detect lines of text and the words that make up a line of text. The input document must be in one of the following image formats: JPEG, PNG, PDF, or TIFF. DetectDocumentText returns the detected text in an array of Block objects. Each document page has as an associated Block of type PAGE. Each PAGE Block object is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is a parent for each word that makes up the line. Words are represented by Block objects of type WORD. DetectDocumentText is a synchronous operation. To analyze documents asynchronously, use StartDocumentTextDetection. For more information, see Document Text Detection. public func detectDocumentText(_ input: DetectDocumentTextRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DetectDocumentText", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Gets configuration information for an adapter specified by an AdapterId, returning information on AdapterName, Description, CreationTime, AutoUpdate status, and FeatureTypes. + public func getAdapter(_ input: GetAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Gets configuration information for the specified adapter version, including: AdapterId, AdapterVersion, FeatureTypes, Status, StatusMessage, DatasetConfig, KMSKeyId, OutputConfig, Tags and EvaluationMetrics. + public func getAdapterVersion(_ input: GetAdapterVersionRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "GetAdapterVersion", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Gets the results for an Amazon Textract asynchronous operation that analyzes text in a document. You start asynchronous text analysis by calling StartDocumentAnalysis, which returns a job identifier (JobId). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartDocumentAnalysis. To get the results of the text-detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis. GetDocumentAnalysis returns an array of Block objects. The following types of information are returned: Form data (key-value pairs). The related information is returned in two Block objects, each of type KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva Carolina is the value. Table and table cell data. A TABLE Block object contains information about a detected table. A CELL Block object is returned for each cell in a table. Lines and words of text. A LINE Block object contains one or more WORD Block objects. All lines and words that are detected in the document are returned (including text that doesn't have a relationship with the value of the StartDocumentAnalysis FeatureTypes input parameter). Query. A QUERY Block object contains the query text, alias and link to the associated Query results block object. Query Results. A QUERY_RESULT Block object contains the answer to the query and an ID that connects it to the query asked. This Block also contains a confidence score. While processing a document with queries, look out for INVALID_REQUEST_PARAMETERS output. This indicates that either the per page query limit has been exceeded or that the operation is trying to query a page in the document which doesn’t exist. Selection elements such as check boxes and option buttons (radio buttons) can be detected in form data and in tables. A SELECTION_ELEMENT Block object contains information about a selection element, including the selection status. Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetDocumentAnalysis, and populate the NextToken request parameter with the token value that's returned from the previous call to GetDocumentAnalysis. For more information, see Document Text Analysis. public func getDocumentAnalysis(_ input: GetDocumentAnalysisRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "GetDocumentAnalysis", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -120,6 +150,21 @@ public struct Textract: AWSService { return self.client.execute(operation: "GetLendingAnalysisSummary", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// List all version of an adapter that meet the specified filtration criteria. + public func listAdapterVersions(_ input: ListAdapterVersionsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListAdapterVersions", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all adapters that match the specified filtration criteria. + public func listAdapters(_ input: ListAdaptersRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListAdapters", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Lists all tags for an Amazon Textract resource. + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "ListTagsForResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Starts the asynchronous analysis of an input document for relationships between detected items such as key-value pairs, tables, and selection elements. StartDocumentAnalysis can analyze text in documents that are in JPEG, PNG, TIFF, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document. StartDocumentAnalysis returns a job identifier (JobId) that you use to get the results of the operation. When text analysis is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentAnalysis, and pass the job identifier (JobId) from the initial call to StartDocumentAnalysis. For more information, see Document Text Analysis. public func startDocumentAnalysis(_ input: StartDocumentAnalysisRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "StartDocumentAnalysis", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -139,6 +184,21 @@ public struct Textract: AWSService { public func startLendingAnalysis(_ input: StartLendingAnalysisRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "StartLendingAnalysis", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + + /// Adds one or more tags to the specified resource. + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "TagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Removes any tags with the specified keys from the specified resource. + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UntagResource", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Update the configuration for an adapter. FeatureTypes configurations cannot be updated. At least one new parameter must be specified as an argument. + public func updateAdapter(_ input: UpdateAdapterRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "UpdateAdapter", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } } extension Textract { @@ -149,3 +209,136 @@ extension Textract { self.config = from.config.with(patch: patch) } } + +// MARK: Paginators + +extension Textract { + /// List all version of an adapter that meet the specified filtration criteria. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listAdapterVersionsPaginator( + _ input: ListAdapterVersionsRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListAdapterVersionsResponse, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listAdapterVersions, + inputKey: \ListAdapterVersionsRequest.nextToken, + outputKey: \ListAdapterVersionsResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listAdapterVersionsPaginator( + _ input: ListAdapterVersionsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListAdapterVersionsResponse, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listAdapterVersions, + inputKey: \ListAdapterVersionsRequest.nextToken, + outputKey: \ListAdapterVersionsResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Lists all adapters that match the specified filtration criteria. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func listAdaptersPaginator( + _ input: ListAdaptersRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, ListAdaptersResponse, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.listAdapters, + inputKey: \ListAdaptersRequest.nextToken, + outputKey: \ListAdaptersResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func listAdaptersPaginator( + _ input: ListAdaptersRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (ListAdaptersResponse, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.listAdapters, + inputKey: \ListAdaptersRequest.nextToken, + outputKey: \ListAdaptersResponse.nextToken, + on: eventLoop, + onPage: onPage + ) + } +} + +extension Textract.ListAdapterVersionsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Textract.ListAdapterVersionsRequest { + return .init( + adapterId: self.adapterId, + afterCreationTime: self.afterCreationTime, + beforeCreationTime: self.beforeCreationTime, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension Textract.ListAdaptersRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Textract.ListAdaptersRequest { + return .init( + afterCreationTime: self.afterCreationTime, + beforeCreationTime: self.beforeCreationTime, + maxResults: self.maxResults, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/Textract/Textract_shapes.swift b/Sources/Soto/Services/Textract/Textract_shapes.swift index 7e7a866dcf..c2bed2b575 100644 --- a/Sources/Soto/Services/Textract/Textract_shapes.swift +++ b/Sources/Soto/Services/Textract/Textract_shapes.swift @@ -26,6 +26,21 @@ import SotoCore extension Textract { // MARK: Enums + public enum AdapterVersionStatus: String, CustomStringConvertible, Codable, Sendable { + case active = "ACTIVE" + case atRisk = "AT_RISK" + case creationError = "CREATION_ERROR" + case creationInProgress = "CREATION_IN_PROGRESS" + case deprecated = "DEPRECATED" + public var description: String { return self.rawValue } + } + + public enum AutoUpdate: String, CustomStringConvertible, Codable, Sendable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum BlockType: String, CustomStringConvertible, Codable, Sendable { case cell = "CELL" case keyValueSet = "KEY_VALUE_SET" @@ -122,17 +137,170 @@ extension Textract { // MARK: Shapes + public struct Adapter: AWSEncodableShape { + /// A unique identifier for the adapter resource. + public let adapterId: String + /// Pages is a parameter that the user inputs to specify which pages to apply an adapter to. The following is a list of rules for using this parameter. If a page is not specified, it is set to ["1"] by default. The following characters are allowed in the parameter's string: 0 1 2 3 4 5 6 7 8 9 - *. No whitespace is allowed. When using * to indicate all pages, it must be the only element in the list. You can use page intervals, such as ["1-3", "1-1", "4-*"]. Where * indicates last page of document. Specified pages must be greater than 0 and less than or equal to the number of pages in the document. + public let pages: [String]? + /// A string that identifies the version of the adapter. + public let version: String + + public init(adapterId: String, pages: [String]? = nil, version: String) { + self.adapterId = adapterId + self.pages = pages + self.version = version + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + try self.pages?.forEach { + try validate($0, name: "pages[]", parent: name, max: 9) + try validate($0, name: "pages[]", parent: name, min: 1) + try validate($0, name: "pages[]", parent: name, pattern: "^[0-9\\*\\-]+$") + } + try self.validate(self.pages, name: "pages", parent: name, min: 1) + try self.validate(self.version, name: "version", parent: name, max: 128) + try self.validate(self.version, name: "version", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case pages = "Pages" + case version = "Version" + } + } + + public struct AdapterOverview: AWSDecodableShape { + /// A unique identifier for the adapter resource. + public let adapterId: String? + /// A string naming the adapter resource. + public let adapterName: String? + /// The date and time that the adapter was created. + public let creationTime: Date? + /// The feature types that the adapter is operating on. + public let featureTypes: [FeatureType]? + + public init(adapterId: String? = nil, adapterName: String? = nil, creationTime: Date? = nil, featureTypes: [FeatureType]? = nil) { + self.adapterId = adapterId + self.adapterName = adapterName + self.creationTime = creationTime + self.featureTypes = featureTypes + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterName = "AdapterName" + case creationTime = "CreationTime" + case featureTypes = "FeatureTypes" + } + } + + public struct AdapterVersionDatasetConfig: AWSEncodableShape & AWSDecodableShape { + public let manifestS3Object: S3Object? + + public init(manifestS3Object: S3Object? = nil) { + self.manifestS3Object = manifestS3Object + } + + public func validate(name: String) throws { + try self.manifestS3Object?.validate(name: "\(name).manifestS3Object") + } + + private enum CodingKeys: String, CodingKey { + case manifestS3Object = "ManifestS3Object" + } + } + + public struct AdapterVersionEvaluationMetric: AWSDecodableShape { + /// The F1 score, precision, and recall metrics for the baseline model. + public let adapterVersion: EvaluationMetric? + /// The F1 score, precision, and recall metrics for the baseline model. + public let baseline: EvaluationMetric? + /// Indicates the feature type being analyzed by a given adapter version. + public let featureType: FeatureType? + + public init(adapterVersion: EvaluationMetric? = nil, baseline: EvaluationMetric? = nil, featureType: FeatureType? = nil) { + self.adapterVersion = adapterVersion + self.baseline = baseline + self.featureType = featureType + } + + private enum CodingKeys: String, CodingKey { + case adapterVersion = "AdapterVersion" + case baseline = "Baseline" + case featureType = "FeatureType" + } + } + + public struct AdapterVersionOverview: AWSDecodableShape { + /// A unique identifier for the adapter associated with a given adapter version. + public let adapterId: String? + /// An identified for a given adapter version. + public let adapterVersion: String? + /// The date and time that a given adapter version was created. + public let creationTime: Date? + /// The feature types that the adapter version is operating on. + public let featureTypes: [FeatureType]? + /// Contains information on the status of a given adapter version. + public let status: AdapterVersionStatus? + /// A message explaining the status of a given adapter vesion. + public let statusMessage: String? + + public init(adapterId: String? = nil, adapterVersion: String? = nil, creationTime: Date? = nil, featureTypes: [FeatureType]? = nil, status: AdapterVersionStatus? = nil, statusMessage: String? = nil) { + self.adapterId = adapterId + self.adapterVersion = adapterVersion + self.creationTime = creationTime + self.featureTypes = featureTypes + self.status = status + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterVersion = "AdapterVersion" + case creationTime = "CreationTime" + case featureTypes = "FeatureTypes" + case status = "Status" + case statusMessage = "StatusMessage" + } + } + + public struct AdaptersConfig: AWSEncodableShape { + /// A list of adapters to be used when analyzing the specified document. + public let adapters: [Adapter] + + public init(adapters: [Adapter]) { + self.adapters = adapters + } + + public func validate(name: String) throws { + try self.adapters.forEach { + try $0.validate(name: "\(name).adapters[]") + } + try self.validate(self.adapters, name: "adapters", parent: name, max: 100) + try self.validate(self.adapters, name: "adapters", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case adapters = "Adapters" + } + } + public struct AnalyzeDocumentRequest: AWSEncodableShape { + /// Specifies the adapter to be used when analyzing a document. + public let adaptersConfig: AdaptersConfig? /// The input document as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Textract operations, you can't pass image bytes. The document must be an image in JPEG, PNG, PDF, or TIFF format. If you're using an AWS SDK to call Amazon Textract, you might not need to base64-encode image bytes that are passed using the Bytes field. public let document: Document - /// A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list to return information about the layout of the document. To perform both forms and table analysis, add TABLES and FORMS to FeatureTypes. To detect signatures within the document and within form data and table data, add SIGNATURES to either TABLES or FORMS. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes). + /// A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list to return information about the layout of the document. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes). public let featureTypes: [FeatureType] /// Sets the configuration for the human in the loop workflow for analyzing documents. public let humanLoopConfig: HumanLoopConfig? /// Contains Queries and the alias for those Queries, as determined by the input. public let queriesConfig: QueriesConfig? - public init(document: Document, featureTypes: [FeatureType], humanLoopConfig: HumanLoopConfig? = nil, queriesConfig: QueriesConfig? = nil) { + public init(adaptersConfig: AdaptersConfig? = nil, document: Document, featureTypes: [FeatureType], humanLoopConfig: HumanLoopConfig? = nil, queriesConfig: QueriesConfig? = nil) { + self.adaptersConfig = adaptersConfig self.document = document self.featureTypes = featureTypes self.humanLoopConfig = humanLoopConfig @@ -140,12 +308,14 @@ extension Textract { } public func validate(name: String) throws { + try self.adaptersConfig?.validate(name: "\(name).adaptersConfig") try self.document.validate(name: "\(name).document") try self.humanLoopConfig?.validate(name: "\(name).humanLoopConfig") try self.queriesConfig?.validate(name: "\(name).queriesConfig") } private enum CodingKeys: String, CodingKey { + case adaptersConfig = "AdaptersConfig" case document = "Document" case featureTypes = "FeatureTypes" case humanLoopConfig = "HumanLoopConfig" @@ -273,7 +443,7 @@ extension Textract { } public struct Block: AWSDecodableShape { - /// The type of text item that's recognized. In operations for text detection, the following types are returned: PAGE - Contains a list of the LINE Block objects that are detected on a document page. WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. In text analysis operations, the following types are returned: PAGE - Contains a list of child Block objects that are detected on a document page. KEY_VALUE_SET - Stores the KEY and VALUE Block objects for linked text that's detected on a document page. Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or a VALUE Block object. WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. TABLE - A table that's detected on a document page. A table is grid-based information with two or more rows or columns, with a cell span of one row and one column each. TABLE_TITLE - The title of a table. A title is typically a line of text above or below a table, or embedded as the first row of a table. TABLE_FOOTER - The footer associated with a table. A footer is typically a line or lines of text below a table or embedded as the last row of a table. CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell. MERGED_CELL - A cell in a table whose content spans more than one row or column. The Relationships array for this cell contain data from individual cells. SELECTION_ELEMENT - A selection element such as an option button (radio button) or a check box that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element. SIGNATURE - The location and confidence score of a signature detected on a document page. Can be returned as part of a Key-Value pair or a detected cell. QUERY - A question asked during the call of AnalyzeDocument. Contains an alias and an ID that attaches it to its answer. QUERY_RESULT - A response to a question asked during the call of analyze document. Comes with an alias and ID for ease of locating in a response. Also contains location and confidence score. + /// The type of text item that's recognized. In operations for text detection, the following types are returned: PAGE - Contains a list of the LINE Block objects that are detected on a document page. WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. In text analysis operations, the following types are returned: PAGE - Contains a list of child Block objects that are detected on a document page. KEY_VALUE_SET - Stores the KEY and VALUE Block objects for linked text that's detected on a document page. Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or a VALUE Block object. WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page. TABLE - A table that's detected on a document page. A table is grid-based information with two or more rows or columns, with a cell span of one row and one column each. TABLE_TITLE - The title of a table. A title is typically a line of text above or below a table, or embedded as the first row of a table. TABLE_FOOTER - The footer associated with a table. A footer is typically a line or lines of text below a table or embedded as the last row of a table. CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell. MERGED_CELL - A cell in a table whose content spans more than one row or column. The Relationships array for this cell contain data from individual cells. SELECTION_ELEMENT - A selection element such as an option button (radio button) or a check box that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element. SIGNATURE - The location and confidence score of a signature detected on a document page. Can be returned as part of a Key-Value pair or a detected cell. QUERY - A question asked during the call of AnalyzeDocument. Contains an alias and an ID that attaches it to its answer. QUERY_RESULT - A response to a question asked during the call of analyze document. Comes with an alias and ID for ease of locating in a response. Also contains location and confidence score. The following BlockTypes are only returned for Amazon Textract Layout. LAYOUT_TITLE - The main title of the document. LAYOUT_HEADER - Text located in the top margin of the document. LAYOUT_FOOTER - Text located in the bottom margin of the document. LAYOUT_SECTION_HEADER - The titles of sections within a document. LAYOUT_PAGE_NUMBER - The page number of the documents. LAYOUT_LIST - Any information grouped together in list form. LAYOUT_FIGURE - Indicates the location of an image in a document. LAYOUT_TABLE - Indicates the location of a table in the document. LAYOUT_KEY_VALUE - Indicates the location of form key-values in a document. LAYOUT_TEXT - Text that is present typically as a part of paragraphs in documents. public let blockType: BlockType? /// The column in which a table cell appears. The first column position is 1. ColumnIndex isn't returned by DetectDocumentText and GetDocumentTextDetection. public let columnIndex: Int? @@ -365,6 +535,192 @@ extension Textract { } } + public struct CreateAdapterRequest: AWSEncodableShape { + /// The name to be assigned to the adapter being created. + public let adapterName: String + /// Controls whether or not the adapter should automatically update. + public let autoUpdate: AutoUpdate? + /// Idempotent token is used to recognize the request. If the same token is used with multiple CreateAdapter requests, the same session is returned. This token is employed to avoid unintentionally creating the same session multiple times. + public let clientRequestToken: String? + /// The description to be assigned to the adapter being created. + public let description: String? + /// The type of feature that the adapter is being trained on. Currrenly, supported feature types are: QUERIES + public let featureTypes: [FeatureType] + /// A list of tags to be added to the adapter. + public let tags: [String: String]? + + public init(adapterName: String, autoUpdate: AutoUpdate? = nil, clientRequestToken: String? = CreateAdapterRequest.idempotencyToken(), description: String? = nil, featureTypes: [FeatureType], tags: [String: String]? = nil) { + self.adapterName = adapterName + self.autoUpdate = autoUpdate + self.clientRequestToken = clientRequestToken + self.description = description + self.featureTypes = featureTypes + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.adapterName, name: "adapterName", parent: name, max: 128) + try self.validate(self.adapterName, name: "adapterName", parent: name, min: 1) + try self.validate(self.adapterName, name: "adapterName", parent: name, pattern: "^[a-zA-Z0-9-_]+$") + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-zA-Z0-9-_]+$") + try self.validate(self.description, name: "description", parent: name, max: 256) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, pattern: "^[a-zA-Z0-9\\s!\"\\#\\$%'&\\(\\)\\*\\+\\,\\-\\./:;=\\?@\\[\\\\\\]\\^_`\\{\\|\\}~><]+$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case adapterName = "AdapterName" + case autoUpdate = "AutoUpdate" + case clientRequestToken = "ClientRequestToken" + case description = "Description" + case featureTypes = "FeatureTypes" + case tags = "Tags" + } + } + + public struct CreateAdapterResponse: AWSDecodableShape { + /// A string containing the unique ID for the adapter that has been created. + public let adapterId: String? + + public init(adapterId: String? = nil) { + self.adapterId = adapterId + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + } + } + + public struct CreateAdapterVersionRequest: AWSEncodableShape { + /// A string containing a unique ID for the adapter that will receive a new version. + public let adapterId: String + /// Idempotent token is used to recognize the request. If the same token is used with multiple CreateAdapterVersion requests, the same session is returned. This token is employed to avoid unintentionally creating the same session multiple times. + public let clientRequestToken: String? + /// Specifies a dataset used to train a new adapter version. Takes a ManifestS3Object as the value. + public let datasetConfig: AdapterVersionDatasetConfig + /// The identifier for your AWS Key Management Service key (AWS KMS key). Used to encrypt your documents. + public let kmsKeyId: String? + public let outputConfig: OutputConfig + /// A set of tags (key-value pairs) that you want to attach to the adapter version. + public let tags: [String: String]? + + public init(adapterId: String, clientRequestToken: String? = CreateAdapterVersionRequest.idempotencyToken(), datasetConfig: AdapterVersionDatasetConfig, kmsKeyId: String? = nil, outputConfig: OutputConfig, tags: [String: String]? = nil) { + self.adapterId = adapterId + self.clientRequestToken = clientRequestToken + self.datasetConfig = datasetConfig + self.kmsKeyId = kmsKeyId + self.outputConfig = outputConfig + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1) + try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-zA-Z0-9-_]+$") + try self.datasetConfig.validate(name: "\(name).datasetConfig") + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048) + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1) + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$") + try self.outputConfig.validate(name: "\(name).outputConfig") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case clientRequestToken = "ClientRequestToken" + case datasetConfig = "DatasetConfig" + case kmsKeyId = "KMSKeyId" + case outputConfig = "OutputConfig" + case tags = "Tags" + } + } + + public struct CreateAdapterVersionResponse: AWSDecodableShape { + /// A string containing the unique ID for the adapter that has received a new version. + public let adapterId: String? + /// A string describing the new version of the adapter. + public let adapterVersion: String? + + public init(adapterId: String? = nil, adapterVersion: String? = nil) { + self.adapterId = adapterId + self.adapterVersion = adapterVersion + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterVersion = "AdapterVersion" + } + } + + public struct DeleteAdapterRequest: AWSEncodableShape { + /// A string containing a unique ID for the adapter to be deleted. + public let adapterId: String + + public init(adapterId: String) { + self.adapterId = adapterId + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + } + } + + public struct DeleteAdapterResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteAdapterVersionRequest: AWSEncodableShape { + /// A string containing a unique ID for the adapter version that will be deleted. + public let adapterId: String + /// Specifies the adapter version to be deleted. + public let adapterVersion: String + + public init(adapterId: String, adapterVersion: String) { + self.adapterId = adapterId + self.adapterVersion = adapterVersion + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + try self.validate(self.adapterVersion, name: "adapterVersion", parent: name, max: 128) + try self.validate(self.adapterVersion, name: "adapterVersion", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterVersion = "AdapterVersion" + } + } + + public struct DeleteAdapterVersionResponse: AWSDecodableShape { + public init() {} + } + public struct DetectDocumentTextRequest: AWSEncodableShape { /// The input document as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI to call Amazon Textract operations, you can't pass image bytes. The document must be an image in JPEG or PNG format. If you're using an AWS SDK to call Amazon Textract, you might not need to base64-encode image bytes that are passed using the Bytes field. public let document: Document @@ -493,6 +849,27 @@ extension Textract { } } + public struct EvaluationMetric: AWSDecodableShape { + /// The F1 score for an adapter version. + public let f1Score: Float? + /// The Precision score for an adapter version. + public let precision: Float? + /// The Recall score for an adapter version. + public let recall: Float? + + public init(f1Score: Float? = nil, precision: Float? = nil, recall: Float? = nil) { + self.f1Score = f1Score + self.precision = precision + self.recall = recall + } + + private enum CodingKeys: String, CodingKey { + case f1Score = "F1Score" + case precision = "Precision" + case recall = "Recall" + } + } + public struct ExpenseCurrency: AWSDecodableShape { /// Currency code for detected currency. the current supported codes are: USD EUR GBP CAD INR JPY CHF AUD CNY BZR SEK HKD public let code: String? @@ -658,6 +1035,137 @@ extension Textract { } } + public struct GetAdapterRequest: AWSEncodableShape { + /// A string containing a unique ID for the adapter. + public let adapterId: String + + public init(adapterId: String) { + self.adapterId = adapterId + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + } + } + + public struct GetAdapterResponse: AWSDecodableShape { + /// A string identifying the adapter that information has been retrieved for. + public let adapterId: String? + /// The name of the requested adapter. + public let adapterName: String? + /// Binary value indicating if the adapter is being automatically updated or not. + public let autoUpdate: AutoUpdate? + /// The date and time the requested adapter was created at. + public let creationTime: Date? + /// The description for the requested adapter. + public let description: String? + /// List of the targeted feature types for the requested adapter. + public let featureTypes: [FeatureType]? + /// A set of tags (key-value pairs) associated with the adapter that has been retrieved. + public let tags: [String: String]? + + public init(adapterId: String? = nil, adapterName: String? = nil, autoUpdate: AutoUpdate? = nil, creationTime: Date? = nil, description: String? = nil, featureTypes: [FeatureType]? = nil, tags: [String: String]? = nil) { + self.adapterId = adapterId + self.adapterName = adapterName + self.autoUpdate = autoUpdate + self.creationTime = creationTime + self.description = description + self.featureTypes = featureTypes + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterName = "AdapterName" + case autoUpdate = "AutoUpdate" + case creationTime = "CreationTime" + case description = "Description" + case featureTypes = "FeatureTypes" + case tags = "Tags" + } + } + + public struct GetAdapterVersionRequest: AWSEncodableShape { + /// A string specifying a unique ID for the adapter version you want to retrieve information for. + public let adapterId: String + /// A string specifying the adapter version you want to retrieve information for. + public let adapterVersion: String + + public init(adapterId: String, adapterVersion: String) { + self.adapterId = adapterId + self.adapterVersion = adapterVersion + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + try self.validate(self.adapterVersion, name: "adapterVersion", parent: name, max: 128) + try self.validate(self.adapterVersion, name: "adapterVersion", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterVersion = "AdapterVersion" + } + } + + public struct GetAdapterVersionResponse: AWSDecodableShape { + /// A string containing a unique ID for the adapter version being retrieved. + public let adapterId: String? + /// A string containing the adapter version that has been retrieved. + public let adapterVersion: String? + /// The time that the adapter version was created. + public let creationTime: Date? + /// Specifies a dataset used to train a new adapter version. Takes a ManifestS3Objec as the value. + public let datasetConfig: AdapterVersionDatasetConfig? + /// The evaluation metrics (F1 score, Precision, and Recall) for the requested version, grouped by baseline metrics and adapter version. + public let evaluationMetrics: [AdapterVersionEvaluationMetric]? + /// List of the targeted feature types for the requested adapter version. + public let featureTypes: [FeatureType]? + /// The identifier for your AWS Key Management Service key (AWS KMS key). Used to encrypt your documents. + public let kmsKeyId: String? + public let outputConfig: OutputConfig? + /// The status of the adapter version that has been requested. + public let status: AdapterVersionStatus? + /// A message that describes the status of the requested adapter version. + public let statusMessage: String? + /// A set of tags (key-value pairs) that are associated with the adapter version. + public let tags: [String: String]? + + public init(adapterId: String? = nil, adapterVersion: String? = nil, creationTime: Date? = nil, datasetConfig: AdapterVersionDatasetConfig? = nil, evaluationMetrics: [AdapterVersionEvaluationMetric]? = nil, featureTypes: [FeatureType]? = nil, kmsKeyId: String? = nil, outputConfig: OutputConfig? = nil, status: AdapterVersionStatus? = nil, statusMessage: String? = nil, tags: [String: String]? = nil) { + self.adapterId = adapterId + self.adapterVersion = adapterVersion + self.creationTime = creationTime + self.datasetConfig = datasetConfig + self.evaluationMetrics = evaluationMetrics + self.featureTypes = featureTypes + self.kmsKeyId = kmsKeyId + self.outputConfig = outputConfig + self.status = status + self.statusMessage = statusMessage + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterVersion = "AdapterVersion" + case creationTime = "CreationTime" + case datasetConfig = "DatasetConfig" + case evaluationMetrics = "EvaluationMetrics" + case featureTypes = "FeatureTypes" + case kmsKeyId = "KMSKeyId" + case outputConfig = "OutputConfig" + case status = "Status" + case statusMessage = "StatusMessage" + case tags = "Tags" + } + } + public struct GetDocumentAnalysisRequest: AWSEncodableShape { /// A unique identifier for the text-detection job. The JobId is returned from StartDocumentAnalysis. A JobId value is only valid for 7 days. public let jobId: String @@ -677,7 +1185,7 @@ extension Textract { try self.validate(self.jobId, name: "jobId", parent: name, min: 1) try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[a-zA-Z0-9-_]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S") } @@ -744,7 +1252,7 @@ extension Textract { try self.validate(self.jobId, name: "jobId", parent: name, min: 1) try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[a-zA-Z0-9-_]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S") } @@ -811,7 +1319,7 @@ extension Textract { try self.validate(self.jobId, name: "jobId", parent: name, min: 1) try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[a-zA-Z0-9-_]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S") } @@ -879,7 +1387,7 @@ extension Textract { try self.validate(self.jobId, name: "jobId", parent: name, min: 1) try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[a-zA-Z0-9-_]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S") } @@ -1210,6 +1718,141 @@ extension Textract { } } + public struct ListAdapterVersionsRequest: AWSEncodableShape { + /// A string containing a unique ID for the adapter to match for when listing adapter versions. + public let adapterId: String? + /// Specifies the lower bound for the ListAdapterVersions operation. Ensures ListAdapterVersions returns only adapter versions created after the specified creation time. + public let afterCreationTime: Date? + /// Specifies the upper bound for the ListAdapterVersions operation. Ensures ListAdapterVersions returns only adapter versions created after the specified creation time. + public let beforeCreationTime: Date? + /// The maximum number of results to return when listing adapter versions. + public let maxResults: Int? + /// Identifies the next page of results to return when listing adapter versions. + public let nextToken: String? + + public init(adapterId: String? = nil, afterCreationTime: Date? = nil, beforeCreationTime: Date? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.adapterId = adapterId + self.afterCreationTime = afterCreationTime + self.beforeCreationTime = beforeCreationTime + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case afterCreationTime = "AfterCreationTime" + case beforeCreationTime = "BeforeCreationTime" + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct ListAdapterVersionsResponse: AWSDecodableShape { + /// Adapter versions that match the filtering criteria specified when calling ListAdapters. + public let adapterVersions: [AdapterVersionOverview]? + /// Identifies the next page of results to return when listing adapter versions. + public let nextToken: String? + + public init(adapterVersions: [AdapterVersionOverview]? = nil, nextToken: String? = nil) { + self.adapterVersions = adapterVersions + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case adapterVersions = "AdapterVersions" + case nextToken = "NextToken" + } + } + + public struct ListAdaptersRequest: AWSEncodableShape { + /// Specifies the lower bound for the ListAdapters operation. Ensures ListAdapters returns only adapters created after the specified creation time. + public let afterCreationTime: Date? + /// Specifies the upper bound for the ListAdapters operation. Ensures ListAdapters returns only adapters created before the specified creation time. + public let beforeCreationTime: Date? + /// The maximum number of results to return when listing adapters. + public let maxResults: Int? + /// Identifies the next page of results to return when listing adapters. + public let nextToken: String? + + public init(afterCreationTime: Date? = nil, beforeCreationTime: Date? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.afterCreationTime = afterCreationTime + self.beforeCreationTime = beforeCreationTime + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S") + } + + private enum CodingKeys: String, CodingKey { + case afterCreationTime = "AfterCreationTime" + case beforeCreationTime = "BeforeCreationTime" + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct ListAdaptersResponse: AWSDecodableShape { + /// A list of adapters that matches the filtering criteria specified when calling ListAdapters. + public let adapters: [AdapterOverview]? + /// Identifies the next page of results to return when listing adapters. + public let nextToken: String? + + public init(adapters: [AdapterOverview]? = nil, nextToken: String? = nil) { + self.adapters = adapters + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case adapters = "Adapters" + case nextToken = "NextToken" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that specifies the resource to list tags for. + public let resourceARN: String + + public init(resourceARN: String) { + self.resourceARN = resourceARN + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// A set of tags (key-value pairs) that are part of the requested resource. + public let tags: [String: String]? + + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + public struct NormalizedValue: AWSDecodableShape { /// The value of the date, written as Year-Month-DayTHour:Minute:Second. public let value: String? @@ -1253,7 +1896,7 @@ extension Textract { } } - public struct OutputConfig: AWSEncodableShape { + public struct OutputConfig: AWSEncodableShape & AWSDecodableShape { /// The name of the bucket your output will go to. public let s3Bucket: String /// The prefix of the object key that the output will be saved to. When not enabled, the prefix will be “textract_output". @@ -1402,7 +2045,7 @@ extension Textract { } } - public struct S3Object: AWSEncodableShape { + public struct S3Object: AWSEncodableShape & AWSDecodableShape { /// The name of the S3 bucket. Note that the # character is not valid in the file name. public let bucket: String? /// The file name of the input document. Synchronous operations can use image files that are in JPEG or PNG format. Asynchronous operations also support PDF and TIFF format files. @@ -1469,6 +2112,8 @@ extension Textract { } public struct StartDocumentAnalysisRequest: AWSEncodableShape { + /// Specifies the adapter to be used when analyzing a document. + public let adaptersConfig: AdaptersConfig? /// The idempotent token that you use to identify the start request. If you use the same token with multiple StartDocumentAnalysis requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidentally started more than once. For more information, see Calling Amazon Textract Asynchronous Operations. public let clientRequestToken: String? /// The location of the document to be processed. @@ -1485,7 +2130,8 @@ extension Textract { public let outputConfig: OutputConfig? public let queriesConfig: QueriesConfig? - public init(clientRequestToken: String? = nil, documentLocation: DocumentLocation, featureTypes: [FeatureType], jobTag: String? = nil, kmsKeyId: String? = nil, notificationChannel: NotificationChannel? = nil, outputConfig: OutputConfig? = nil, queriesConfig: QueriesConfig? = nil) { + public init(adaptersConfig: AdaptersConfig? = nil, clientRequestToken: String? = nil, documentLocation: DocumentLocation, featureTypes: [FeatureType], jobTag: String? = nil, kmsKeyId: String? = nil, notificationChannel: NotificationChannel? = nil, outputConfig: OutputConfig? = nil, queriesConfig: QueriesConfig? = nil) { + self.adaptersConfig = adaptersConfig self.clientRequestToken = clientRequestToken self.documentLocation = documentLocation self.featureTypes = featureTypes @@ -1497,6 +2143,7 @@ extension Textract { } public func validate(name: String) throws { + try self.adaptersConfig?.validate(name: "\(name).adaptersConfig") try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1) try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-zA-Z0-9-_]+$") @@ -1513,6 +2160,7 @@ extension Textract { } private enum CodingKeys: String, CodingKey { + case adaptersConfig = "AdaptersConfig" case clientRequestToken = "ClientRequestToken" case documentLocation = "DocumentLocation" case featureTypes = "FeatureTypes" @@ -1717,6 +2365,40 @@ extension Textract { } } + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that specifies the resource to be tagged. + public let resourceARN: String + /// A set of tags (key-value pairs) that you want to assign to the resource. + public let tags: [String: String] + + public init(resourceARN: String, tags: [String: String]) { + self.resourceARN = resourceARN + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + case tags = "Tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct UndetectedSignature: AWSDecodableShape { /// The page where a signature was expected but not found. public let page: Int? @@ -1730,6 +2412,107 @@ extension Textract { } } + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that specifies the resource to be untagged. + public let resourceARN: String + /// Specifies the tags to be removed from the resource specified by the ResourceARN. + public let tagKeys: [String] + + public init(resourceARN: String, tagKeys: [String]) { + self.resourceARN = resourceARN + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + case tagKeys = "TagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateAdapterRequest: AWSEncodableShape { + /// A string containing a unique ID for the adapter that will be updated. + public let adapterId: String + /// The new name to be applied to the adapter. + public let adapterName: String? + /// The new auto-update status to be applied to the adapter. + public let autoUpdate: AutoUpdate? + /// The new description to be applied to the adapter. + public let description: String? + + public init(adapterId: String, adapterName: String? = nil, autoUpdate: AutoUpdate? = nil, description: String? = nil) { + self.adapterId = adapterId + self.adapterName = adapterName + self.autoUpdate = autoUpdate + self.description = description + } + + public func validate(name: String) throws { + try self.validate(self.adapterId, name: "adapterId", parent: name, max: 1011) + try self.validate(self.adapterId, name: "adapterId", parent: name, min: 12) + try self.validate(self.adapterName, name: "adapterName", parent: name, max: 128) + try self.validate(self.adapterName, name: "adapterName", parent: name, min: 1) + try self.validate(self.adapterName, name: "adapterName", parent: name, pattern: "^[a-zA-Z0-9-_]+$") + try self.validate(self.description, name: "description", parent: name, max: 256) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, pattern: "^[a-zA-Z0-9\\s!\"\\#\\$%'&\\(\\)\\*\\+\\,\\-\\./:;=\\?@\\[\\\\\\]\\^_`\\{\\|\\}~><]+$") + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterName = "AdapterName" + case autoUpdate = "AutoUpdate" + case description = "Description" + } + } + + public struct UpdateAdapterResponse: AWSDecodableShape { + /// A string containing a unique ID for the adapter that has been updated. + public let adapterId: String? + /// A string containing the name of the adapter that has been updated. + public let adapterName: String? + /// The auto-update status of the adapter that has been updated. + public let autoUpdate: AutoUpdate? + /// An object specifying the creation time of the the adapter that has been updated. + public let creationTime: Date? + /// A string containing the description of the adapter that has been updated. + public let description: String? + /// List of the targeted feature types for the updated adapter. + public let featureTypes: [FeatureType]? + + public init(adapterId: String? = nil, adapterName: String? = nil, autoUpdate: AutoUpdate? = nil, creationTime: Date? = nil, description: String? = nil, featureTypes: [FeatureType]? = nil) { + self.adapterId = adapterId + self.adapterName = adapterName + self.autoUpdate = autoUpdate + self.creationTime = creationTime + self.description = description + self.featureTypes = featureTypes + } + + private enum CodingKeys: String, CodingKey { + case adapterId = "AdapterId" + case adapterName = "AdapterName" + case autoUpdate = "AutoUpdate" + case creationTime = "CreationTime" + case description = "Description" + case featureTypes = "FeatureTypes" + } + } + public struct Warning: AWSDecodableShape { /// The error code for the warning. public let errorCode: String? @@ -1755,6 +2538,7 @@ public struct TextractErrorType: AWSErrorType { enum Code: String { case accessDeniedException = "AccessDeniedException" case badDocumentException = "BadDocumentException" + case conflictException = "ConflictException" case documentTooLargeException = "DocumentTooLargeException" case humanLoopQuotaExceededException = "HumanLoopQuotaExceededException" case idempotentParameterMismatchException = "IdempotentParameterMismatchException" @@ -1765,8 +2549,11 @@ public struct TextractErrorType: AWSErrorType { case invalidS3ObjectException = "InvalidS3ObjectException" case limitExceededException = "LimitExceededException" case provisionedThroughputExceededException = "ProvisionedThroughputExceededException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" case unsupportedDocumentException = "UnsupportedDocumentException" + case validationException = "ValidationException" } private let error: Code @@ -1791,6 +2578,8 @@ public struct TextractErrorType: AWSErrorType { public static var accessDeniedException: Self { .init(.accessDeniedException) } /// Amazon Textract isn't able to read the document. For more information on the document limits in Amazon Textract, see limits. public static var badDocumentException: Self { .init(.badDocumentException) } + /// Updating or deleting a resource can cause an inconsistent state. + public static var conflictException: Self { .init(.conflictException) } /// The document can't be processed because it's too large. The maximum document size for synchronous operations 10 MB. The maximum document size for asynchronous operations is 500 MB for PDF files. public static var documentTooLargeException: Self { .init(.documentTooLargeException) } /// Indicates you have exceeded the maximum number of active human in the loop workflows available @@ -1811,10 +2600,16 @@ public struct TextractErrorType: AWSErrorType { public static var limitExceededException: Self { .init(.limitExceededException) } /// The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract. public static var provisionedThroughputExceededException: Self { .init(.provisionedThroughputExceededException) } + /// Returned when an operation tried to access a nonexistent resource. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// Returned when a request cannot be completed as it would exceed a maximum service quota. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } /// Amazon Textract is temporarily unable to process the request. Try your call again. public static var throttlingException: Self { .init(.throttlingException) } /// The format of the input document isn't supported. Documents for operations can be in PNG, JPEG, PDF, or TIFF format. public static var unsupportedDocumentException: Self { .init(.unsupportedDocumentException) } + /// Indicates that a request was not valid. Check request for proper formatting. + public static var validationException: Self { .init(.validationException) } } extension TextractErrorType: Equatable { diff --git a/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift b/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift index 502fbf621e..a7287f1b36 100644 --- a/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift +++ b/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift @@ -103,6 +103,7 @@ extension Transcribe { public enum MediaFormat: String, CustomStringConvertible, Codable, Sendable { case amr = "amr" case flac = "flac" + case m4a = "m4a" case mp3 = "mp3" case mp4 = "mp4" case ogg = "ogg" diff --git a/Sources/Soto/Services/Transfer/Transfer_shapes.swift b/Sources/Soto/Services/Transfer/Transfer_shapes.swift index 3ce3000072..6a2b47f247 100644 --- a/Sources/Soto/Services/Transfer/Transfer_shapes.swift +++ b/Sources/Soto/Services/Transfer/Transfer_shapes.swift @@ -641,9 +641,9 @@ extension Transfer { try self.validate(self.loggingRole, name: "loggingRole", parent: name, max: 2048) try self.validate(self.loggingRole, name: "loggingRole", parent: name, min: 20) try self.validate(self.loggingRole, name: "loggingRole", parent: name, pattern: "^arn:.*role/") - try self.validate(self.postAuthenticationLoginBanner, name: "postAuthenticationLoginBanner", parent: name, max: 512) + try self.validate(self.postAuthenticationLoginBanner, name: "postAuthenticationLoginBanner", parent: name, max: 4096) try self.validate(self.postAuthenticationLoginBanner, name: "postAuthenticationLoginBanner", parent: name, pattern: "^[\\x09-\\x0D\\x20-\\x7E]*$") - try self.validate(self.preAuthenticationLoginBanner, name: "preAuthenticationLoginBanner", parent: name, max: 512) + try self.validate(self.preAuthenticationLoginBanner, name: "preAuthenticationLoginBanner", parent: name, max: 4096) try self.validate(self.preAuthenticationLoginBanner, name: "preAuthenticationLoginBanner", parent: name, pattern: "^[\\x09-\\x0D\\x20-\\x7E]*$") try self.protocolDetails?.validate(name: "\(name).protocolDetails") try self.validate(self.protocols, name: "protocols", parent: name, max: 4) @@ -3556,7 +3556,7 @@ extension Transfer { } public struct SftpConnectorConfig: AWSEncodableShape & AWSDecodableShape { - /// The public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting. You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key. The three standard SSH public key format elements are , , and an optional , with spaces between each element. Specify only the and : do not enter the portion of the key. For the trusted host key, Transfer Family accepts RSA and ECDSA keys. For RSA keys, the key type is ssh-rsa. For ECDSA keys, the key type is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the size of the key you generated. + /// The public portion of the host key, or keys, that are used to identify the external server to which you are connecting. You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key. The three standard SSH public key format elements are , , and an optional , with spaces between each element. Specify only the and : do not enter the portion of the key. For the trusted host key, Transfer Family accepts RSA and ECDSA keys. For RSA keys, the string is ssh-rsa. For ECDSA keys, the string is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the size of the key you generated. public let trustedHostKeys: [String]? /// The identifier for the secret (in Amazon Web Services Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier can be either the Amazon Resource Name (ARN) or the name of the secret. public let userSecretId: String? @@ -4334,9 +4334,9 @@ extension Transfer { try self.identityProviderDetails?.validate(name: "\(name).identityProviderDetails") try self.validate(self.loggingRole, name: "loggingRole", parent: name, max: 2048) try self.validate(self.loggingRole, name: "loggingRole", parent: name, pattern: "^$|arn:.*role/") - try self.validate(self.postAuthenticationLoginBanner, name: "postAuthenticationLoginBanner", parent: name, max: 512) + try self.validate(self.postAuthenticationLoginBanner, name: "postAuthenticationLoginBanner", parent: name, max: 4096) try self.validate(self.postAuthenticationLoginBanner, name: "postAuthenticationLoginBanner", parent: name, pattern: "^[\\x09-\\x0D\\x20-\\x7E]*$") - try self.validate(self.preAuthenticationLoginBanner, name: "preAuthenticationLoginBanner", parent: name, max: 512) + try self.validate(self.preAuthenticationLoginBanner, name: "preAuthenticationLoginBanner", parent: name, max: 4096) try self.validate(self.preAuthenticationLoginBanner, name: "preAuthenticationLoginBanner", parent: name, pattern: "^[\\x09-\\x0D\\x20-\\x7E]*$") try self.protocolDetails?.validate(name: "\(name).protocolDetails") try self.validate(self.protocols, name: "protocols", parent: name, max: 4) diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api+async.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api+async.swift index cdbc1758bd..649b646d7d 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api+async.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api+async.swift @@ -31,6 +31,11 @@ extension WorkSpaces { return try await self.client.execute(operation: "AssociateIpGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Associates the specified application to the specified WorkSpace. + public func associateWorkspaceApplication(_ input: AssociateWorkspaceApplicationRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> AssociateWorkspaceApplicationResult { + return try await self.client.execute(operation: "AssociateWorkspaceApplication", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Adds one or more rules to the specified IP access control group. This action gives users permission to access their WorkSpaces from the CIDR address ranges specified in the rules. public func authorizeIpRules(_ input: AuthorizeIpRulesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> AuthorizeIpRulesResult { return try await self.client.execute(operation: "AuthorizeIpRules", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -81,7 +86,7 @@ extension WorkSpaces { return try await self.client.execute(operation: "CreateWorkspaceImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. + /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. PCoIP is only available for Windows bundles. public func createWorkspaces(_ input: CreateWorkspacesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> CreateWorkspacesResult { return try await self.client.execute(operation: "CreateWorkspaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -121,6 +126,11 @@ extension WorkSpaces { return try await self.client.execute(operation: "DeleteWorkspaceImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Deploys associated applications to the specified WorkSpace + public func deployWorkspaceApplications(_ input: DeployWorkspaceApplicationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeployWorkspaceApplicationsResult { + return try await self.client.execute(operation: "DeployWorkspaceApplications", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Deregisters the specified directory. This operation is asynchronous and returns before the WorkSpace directory is deregistered. If any WorkSpaces are registered to this directory, you must remove them before you can deregister the directory. Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms. To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again. public func deregisterWorkspaceDirectory(_ input: DeregisterWorkspaceDirectoryRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DeregisterWorkspaceDirectoryResult { return try await self.client.execute(operation: "DeregisterWorkspaceDirectory", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -136,6 +146,21 @@ extension WorkSpaces { return try await self.client.execute(operation: "DescribeAccountModifications", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Describes the associations between the application and the specified associated resources. + public func describeApplicationAssociations(_ input: DescribeApplicationAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeApplicationAssociationsResult { + return try await self.client.execute(operation: "DescribeApplicationAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Describes the specified applications by filtering based on their compute types, license availability, operating systems, and owners. + public func describeApplications(_ input: DescribeApplicationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeApplicationsResult { + return try await self.client.execute(operation: "DescribeApplications", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Describes the associations between the applications and the specified bundle. + public func describeBundleAssociations(_ input: DescribeBundleAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeBundleAssociationsResult { + return try await self.client.execute(operation: "DescribeBundleAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Describes the specified client branding. Client branding allows you to customize the log in page of various device types for your users. You can add your company logo, the support email address, support link, link to reset password, and a custom message for users trying to sign in. Only device types that have branding information configured will be shown in the response. public func describeClientBranding(_ input: DescribeClientBrandingRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeClientBrandingResult { return try await self.client.execute(operation: "DescribeClientBranding", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -161,6 +186,11 @@ extension WorkSpaces { return try await self.client.execute(operation: "DescribeConnectionAliases", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Describes the associations between the applications and the specified image. + public func describeImageAssociations(_ input: DescribeImageAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeImageAssociationsResult { + return try await self.client.execute(operation: "DescribeImageAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Describes one or more of your IP access control groups. public func describeIpGroups(_ input: DescribeIpGroupsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeIpGroupsResult { return try await self.client.execute(operation: "DescribeIpGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -171,6 +201,11 @@ extension WorkSpaces { return try await self.client.execute(operation: "DescribeTags", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Describes the associations betweens applications and the specified WorkSpace. + public func describeWorkspaceAssociations(_ input: DescribeWorkspaceAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeWorkspaceAssociationsResult { + return try await self.client.execute(operation: "DescribeWorkspaceAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Retrieves a list that describes the available WorkSpace bundles. You can filter the results using either bundle ID or owner, but not both. public func describeWorkspaceBundles(_ input: DescribeWorkspaceBundlesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DescribeWorkspaceBundlesResult { return try await self.client.execute(operation: "DescribeWorkspaceBundles", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -216,6 +251,11 @@ extension WorkSpaces { return try await self.client.execute(operation: "DisassociateIpGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Disassociates the specified application from a WorkSpace. + public func disassociateWorkspaceApplication(_ input: DisassociateWorkspaceApplicationRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> DisassociateWorkspaceApplicationResult { + return try await self.client.execute(operation: "DisassociateWorkspaceApplication", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Imports client branding. Client branding allows you to customize your WorkSpace's client login portal. You can tailor your login portal company logo, the support email address, support link, link to reset password, and a custom message for users trying to sign in. After you import client branding, the default branding experience for the specified platform type is replaced with the imported experience You must specify at least one platform type when importing client branding. You can import up to 6 MB of data with each request. If your request exceeds this limit, you can import client branding for different platform types using separate requests. In each platform type, the SupportEmail and SupportLink parameters are mutually exclusive. You can specify only one parameter for each platform type, but not both. Imported data can take up to a minute to appear in the WorkSpaces client. public func importClientBranding(_ input: ImportClientBrandingRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) async throws -> ImportClientBrandingResult { return try await self.client.execute(operation: "ImportClientBranding", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -351,6 +391,50 @@ extension WorkSpaces { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension WorkSpaces { + /// Describes the associations between the application and the specified associated resources. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func describeApplicationAssociationsPaginator( + _ input: DescribeApplicationAssociationsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeApplicationAssociations, + inputKey: \DescribeApplicationAssociationsRequest.nextToken, + outputKey: \DescribeApplicationAssociationsResult.nextToken, + logger: logger, + on: eventLoop + ) + } + + /// Describes the specified applications by filtering based on their compute types, license availability, operating systems, and owners. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + public func describeApplicationsPaginator( + _ input: DescribeApplicationsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeApplications, + inputKey: \DescribeApplicationsRequest.nextToken, + outputKey: \DescribeApplicationsResult.nextToken, + logger: logger, + on: eventLoop + ) + } + /// Retrieves a list that describes the available WorkSpace bundles. You can filter the results using either bundle ID or owner, but not both. /// Return PaginatorSequence for operation. /// diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift index fb49ed9049..8b20689204 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift @@ -83,6 +83,11 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "AssociateIpGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Associates the specified application to the specified WorkSpace. + public func associateWorkspaceApplication(_ input: AssociateWorkspaceApplicationRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "AssociateWorkspaceApplication", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Adds one or more rules to the specified IP access control group. This action gives users permission to access their WorkSpaces from the CIDR address ranges specified in the rules. public func authorizeIpRules(_ input: AuthorizeIpRulesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "AuthorizeIpRules", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -133,7 +138,7 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "CreateWorkspaceImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } - /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. + /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. PCoIP is only available for Windows bundles. public func createWorkspaces(_ input: CreateWorkspacesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "CreateWorkspaces", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } @@ -173,6 +178,11 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "DeleteWorkspaceImage", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Deploys associated applications to the specified WorkSpace + public func deployWorkspaceApplications(_ input: DeployWorkspaceApplicationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DeployWorkspaceApplications", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Deregisters the specified directory. This operation is asynchronous and returns before the WorkSpace directory is deregistered. If any WorkSpaces are registered to this directory, you must remove them before you can deregister the directory. Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms. To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again. public func deregisterWorkspaceDirectory(_ input: DeregisterWorkspaceDirectoryRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DeregisterWorkspaceDirectory", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -188,6 +198,21 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "DescribeAccountModifications", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Describes the associations between the application and the specified associated resources. + public func describeApplicationAssociations(_ input: DescribeApplicationAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DescribeApplicationAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Describes the specified applications by filtering based on their compute types, license availability, operating systems, and owners. + public func describeApplications(_ input: DescribeApplicationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DescribeApplications", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + + /// Describes the associations between the applications and the specified bundle. + public func describeBundleAssociations(_ input: DescribeBundleAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DescribeBundleAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Describes the specified client branding. Client branding allows you to customize the log in page of various device types for your users. You can add your company logo, the support email address, support link, link to reset password, and a custom message for users trying to sign in. Only device types that have branding information configured will be shown in the response. public func describeClientBranding(_ input: DescribeClientBrandingRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeClientBranding", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -213,6 +238,11 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "DescribeConnectionAliases", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Describes the associations between the applications and the specified image. + public func describeImageAssociations(_ input: DescribeImageAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DescribeImageAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Describes one or more of your IP access control groups. public func describeIpGroups(_ input: DescribeIpGroupsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeIpGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -223,6 +253,11 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "DescribeTags", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Describes the associations betweens applications and the specified WorkSpace. + public func describeWorkspaceAssociations(_ input: DescribeWorkspaceAssociationsRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DescribeWorkspaceAssociations", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Retrieves a list that describes the available WorkSpace bundles. You can filter the results using either bundle ID or owner, but not both. public func describeWorkspaceBundles(_ input: DescribeWorkspaceBundlesRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "DescribeWorkspaceBundles", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -268,6 +303,11 @@ public struct WorkSpaces: AWSService { return self.client.execute(operation: "DisassociateIpGroups", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) } + /// Disassociates the specified application from a WorkSpace. + public func disassociateWorkspaceApplication(_ input: DisassociateWorkspaceApplicationRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { + return self.client.execute(operation: "DisassociateWorkspaceApplication", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) + } + /// Imports client branding. Client branding allows you to customize your WorkSpace's client login portal. You can tailor your login portal company logo, the support email address, support link, link to reset password, and a custom message for users trying to sign in. After you import client branding, the default branding experience for the specified platform type is replaced with the imported experience You must specify at least one platform type when importing client branding. You can import up to 6 MB of data with each request. If your request exceeds this limit, you can import client branding for different platform types using separate requests. In each platform type, the SupportEmail and SupportLink parameters are mutually exclusive. You can specify only one parameter for each platform type, but not both. Imported data can take up to a minute to appear in the WorkSpaces client. public func importClientBranding(_ input: ImportClientBrandingRequest, logger: Logger = AWSClient.loggingDisabled, on eventLoop: EventLoop? = nil) -> EventLoopFuture { return self.client.execute(operation: "ImportClientBranding", path: "/", httpMethod: .POST, serviceConfig: self.config, input: input, logger: logger, on: eventLoop) @@ -411,6 +451,112 @@ extension WorkSpaces { // MARK: Paginators extension WorkSpaces { + /// Describes the associations between the application and the specified associated resources. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func describeApplicationAssociationsPaginator( + _ input: DescribeApplicationAssociationsRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, DescribeApplicationAssociationsResult, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.describeApplicationAssociations, + inputKey: \DescribeApplicationAssociationsRequest.nextToken, + outputKey: \DescribeApplicationAssociationsResult.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func describeApplicationAssociationsPaginator( + _ input: DescribeApplicationAssociationsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (DescribeApplicationAssociationsResult, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.describeApplicationAssociations, + inputKey: \DescribeApplicationAssociationsRequest.nextToken, + outputKey: \DescribeApplicationAssociationsResult.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Describes the specified applications by filtering based on their compute types, license availability, operating systems, and owners. + /// + /// Provide paginated results to closure `onPage` for it to combine them into one result. + /// This works in a similar manner to `Array.reduce(_:_:) -> Result`. + /// + /// Parameters: + /// - input: Input for request + /// - initialValue: The value to use as the initial accumulating value. `initialValue` is passed to `onPage` the first time it is called. + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each paginated response. It combines an accumulating result with the contents of response. This combined result is then returned + /// along with a boolean indicating if the paginate operation should continue. + public func describeApplicationsPaginator( + _ input: DescribeApplicationsRequest, + _ initialValue: Result, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (Result, DescribeApplicationsResult, EventLoop) -> EventLoopFuture<(Bool, Result)> + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + initialValue: initialValue, + command: self.describeApplications, + inputKey: \DescribeApplicationsRequest.nextToken, + outputKey: \DescribeApplicationsResult.nextToken, + on: eventLoop, + onPage: onPage + ) + } + + /// Provide paginated results to closure `onPage`. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + /// - eventLoop: EventLoop to run this process on + /// - onPage: closure called with each block of entries. Returns boolean indicating whether we should continue. + public func describeApplicationsPaginator( + _ input: DescribeApplicationsRequest, + logger: Logger = AWSClient.loggingDisabled, + on eventLoop: EventLoop? = nil, + onPage: @escaping (DescribeApplicationsResult, EventLoop) -> EventLoopFuture + ) -> EventLoopFuture { + return self.client.paginate( + input: input, + command: self.describeApplications, + inputKey: \DescribeApplicationsRequest.nextToken, + outputKey: \DescribeApplicationsResult.nextToken, + on: eventLoop, + onPage: onPage + ) + } + /// Retrieves a list that describes the available WorkSpace bundles. You can filter the results using either bundle ID or owner, but not both. /// /// Provide paginated results to closure `onPage` for it to combine them into one result. @@ -571,6 +717,31 @@ extension WorkSpaces { } } +extension WorkSpaces.DescribeApplicationAssociationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> WorkSpaces.DescribeApplicationAssociationsRequest { + return .init( + applicationId: self.applicationId, + associatedResourceTypes: self.associatedResourceTypes, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension WorkSpaces.DescribeApplicationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> WorkSpaces.DescribeApplicationsRequest { + return .init( + applicationIds: self.applicationIds, + computeTypeNames: self.computeTypeNames, + licenseType: self.licenseType, + maxResults: self.maxResults, + nextToken: token, + operatingSystemNames: self.operatingSystemNames, + owner: self.owner + ) + } +} + extension WorkSpaces.DescribeWorkspaceBundlesRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> WorkSpaces.DescribeWorkspaceBundlesRequest { return .init( diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift index 1cc9d50a11..a1ad26da31 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift @@ -38,6 +38,35 @@ extension WorkSpaces { public var description: String { return self.rawValue } } + public enum ApplicationAssociatedResourceType: String, CustomStringConvertible, Codable, Sendable { + case bundle = "BUNDLE" + case image = "IMAGE" + case workspace = "WORKSPACE" + public var description: String { return self.rawValue } + } + + public enum AssociationErrorCode: String, CustomStringConvertible, Codable, Sendable { + case insufficientDiskspace = "ValidationError.InsufficientDiskSpace" + case insufficientMemory = "ValidationError.InsufficientMemory" + case internalServerError = "DeploymentError.InternalServerError" + case unsupportedOperatingSystem = "ValidationError.UnsupportedOperatingSystem" + case workspaceUnreachable = "DeploymentError.WorkspaceUnreachable" + public var description: String { return self.rawValue } + } + + public enum AssociationState: String, CustomStringConvertible, Codable, Sendable { + case completed = "COMPLETED" + case error = "ERROR" + case installing = "INSTALLING" + case pendingInstall = "PENDING_INSTALL" + case pendingInstallDeployment = "PENDING_INSTALL_DEPLOYMENT" + case pendingUninstall = "PENDING_UNINSTALL" + case pendingUninstallDeployment = "PENDING_UNINSTALL_DEPLOYMENT" + case removed = "REMOVED" + case uninstalling = "UNINSTALLING" + public var description: String { return self.rawValue } + } + public enum AssociationStatus: String, CustomStringConvertible, Codable, Sendable { case associatedWithOwnerAccount = "ASSOCIATED_WITH_OWNER_ACCOUNT" case associatedWithSharedAccount = "ASSOCIATED_WITH_SHARED_ACCOUNT" @@ -47,6 +76,11 @@ extension WorkSpaces { public var description: String { return self.rawValue } } + public enum BundleAssociatedResourceType: String, CustomStringConvertible, Codable, Sendable { + case application = "APPLICATION" + public var description: String { return self.rawValue } + } + public enum BundleType: String, CustomStringConvertible, Codable, Sendable { case regular = "REGULAR" case standby = "STANDBY" @@ -125,6 +159,11 @@ extension WorkSpaces { public var description: String { return self.rawValue } } + public enum ImageAssociatedResourceType: String, CustomStringConvertible, Codable, Sendable { + case application = "APPLICATION" + public var description: String { return self.rawValue } + } + public enum ImageType: String, CustomStringConvertible, Codable, Sendable { case owned = "OWNED" case shared = "SHARED" @@ -150,6 +189,21 @@ extension WorkSpaces { public var description: String { return self.rawValue } } + public enum OperatingSystemName: String, CustomStringConvertible, Codable, Sendable { + case amazonLinux2 = "AMAZON_LINUX_2" + case ubuntu1804 = "UBUNTU_18_04" + case ubuntu2004 = "UBUNTU_20_04" + case ubuntu2204 = "UBUNTU_22_04" + case unknown = "UNKNOWN" + case windows10 = "WINDOWS_10" + case windows11 = "WINDOWS_11" + case windows7 = "WINDOWS_7" + case windowsServer2016 = "WINDOWS_SERVER_2016" + case windowsServer2019 = "WINDOWS_SERVER_2019" + case windowsServer2022 = "WINDOWS_SERVER_2022" + public var description: String { return self.rawValue } + } + public enum OperatingSystemType: String, CustomStringConvertible, Codable, Sendable { case linux = "LINUX" case windows = "WINDOWS" @@ -194,6 +248,25 @@ extension WorkSpaces { public var description: String { return self.rawValue } } + public enum WorkSpaceApplicationLicenseType: String, CustomStringConvertible, Codable, Sendable { + case licensed = "LICENSED" + case unlicensed = "UNLICENSED" + public var description: String { return self.rawValue } + } + + public enum WorkSpaceApplicationState: String, CustomStringConvertible, Codable, Sendable { + case available = "AVAILABLE" + case error = "ERROR" + case pending = "PENDING" + case uninstallOnly = "UNINSTALL_ONLY" + public var description: String { return self.rawValue } + } + + public enum WorkSpaceAssociatedResourceType: String, CustomStringConvertible, Codable, Sendable { + case application = "APPLICATION" + public var description: String { return self.rawValue } + } + public enum WorkspaceBundleState: String, CustomStringConvertible, Codable, Sendable { case available = "AVAILABLE" case error = "ERROR" @@ -332,6 +405,43 @@ extension WorkSpaces { } } + public struct ApplicationResourceAssociation: AWSDecodableShape { + /// The identifier of the application. + public let applicationId: String? + /// The identifier of the associated resource. + public let associatedResourceId: String? + /// The resource type of the associated resource. + public let associatedResourceType: ApplicationAssociatedResourceType? + /// The time the association was created. + public let created: Date? + /// The time the association status was last updated. + public let lastUpdatedTime: Date? + /// The status of the application resource association. + public let state: AssociationState? + /// The reason the association deployment failed. + public let stateReason: AssociationStateReason? + + public init(applicationId: String? = nil, associatedResourceId: String? = nil, associatedResourceType: ApplicationAssociatedResourceType? = nil, created: Date? = nil, lastUpdatedTime: Date? = nil, state: AssociationState? = nil, stateReason: AssociationStateReason? = nil) { + self.applicationId = applicationId + self.associatedResourceId = associatedResourceId + self.associatedResourceType = associatedResourceType + self.created = created + self.lastUpdatedTime = lastUpdatedTime + self.state = state + self.stateReason = stateReason + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + case associatedResourceId = "AssociatedResourceId" + case associatedResourceType = "AssociatedResourceType" + case created = "Created" + case lastUpdatedTime = "LastUpdatedTime" + case state = "State" + case stateReason = "StateReason" + } + } + public struct AssociateConnectionAliasRequest: AWSEncodableShape { /// The identifier of the connection alias. public let aliasId: String @@ -399,6 +509,58 @@ extension WorkSpaces { public init() {} } + public struct AssociateWorkspaceApplicationRequest: AWSEncodableShape { + /// The identifier of the application. + public let applicationId: String + /// The identifier of the WorkSpace. + public let workspaceId: String + + public init(applicationId: String, workspaceId: String) { + self.applicationId = applicationId + self.workspaceId = workspaceId + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^wsa-[0-9a-z]{8,63}$") + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^ws-[0-9a-z]{8,63}$") + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + case workspaceId = "WorkspaceId" + } + } + + public struct AssociateWorkspaceApplicationResult: AWSDecodableShape { + /// Information about the association between the specified WorkSpace and the specified application. + public let association: WorkspaceResourceAssociation? + + public init(association: WorkspaceResourceAssociation? = nil) { + self.association = association + } + + private enum CodingKeys: String, CodingKey { + case association = "Association" + } + } + + public struct AssociationStateReason: AWSDecodableShape { + /// The error code of the association deployment failure. + public let errorCode: AssociationErrorCode? + /// The error message of the association deployment failure. + public let errorMessage: String? + + public init(errorCode: AssociationErrorCode? = nil, errorMessage: String? = nil) { + self.errorCode = errorCode + self.errorMessage = errorMessage + } + + private enum CodingKeys: String, CodingKey { + case errorCode = "ErrorCode" + case errorMessage = "ErrorMessage" + } + } + public struct AuthorizeIpRulesRequest: AWSEncodableShape { /// The identifier of the group. public let groupId: String @@ -424,6 +586,43 @@ extension WorkSpaces { public init() {} } + public struct BundleResourceAssociation: AWSDecodableShape { + /// The identifier of the associated resource. + public let associatedResourceId: String? + /// The resource type of the associated resources. + public let associatedResourceType: BundleAssociatedResourceType? + /// The identifier of the bundle. + public let bundleId: String? + /// The time the association is created. + public let created: Date? + /// The time the association status was last updated. + public let lastUpdatedTime: Date? + /// The status of the bundle resource association. + public let state: AssociationState? + /// The reason the association deployment failed. + public let stateReason: AssociationStateReason? + + public init(associatedResourceId: String? = nil, associatedResourceType: BundleAssociatedResourceType? = nil, bundleId: String? = nil, created: Date? = nil, lastUpdatedTime: Date? = nil, state: AssociationState? = nil, stateReason: AssociationStateReason? = nil) { + self.associatedResourceId = associatedResourceId + self.associatedResourceType = associatedResourceType + self.bundleId = bundleId + self.created = created + self.lastUpdatedTime = lastUpdatedTime + self.state = state + self.stateReason = stateReason + } + + private enum CodingKeys: String, CodingKey { + case associatedResourceId = "AssociatedResourceId" + case associatedResourceType = "AssociatedResourceType" + case bundleId = "BundleId" + case created = "Created" + case lastUpdatedTime = "LastUpdatedTime" + case state = "State" + case stateReason = "StateReason" + } + } + public struct CertificateBasedAuthProperties: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of the Amazon Web Services Certificate Manager Private CA resource. public let certificateAuthorityArn: String? @@ -1366,6 +1565,40 @@ extension WorkSpaces { public init() {} } + public struct DeployWorkspaceApplicationsRequest: AWSEncodableShape { + /// Indicates whether the force flag is applied for the specified WorkSpace. When the force flag is enabled, it allows previously failed deployments to be retried. + public let force: Bool? + /// The identifier of the WorkSpace. + public let workspaceId: String + + public init(force: Bool? = nil, workspaceId: String) { + self.force = force + self.workspaceId = workspaceId + } + + public func validate(name: String) throws { + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^ws-[0-9a-z]{8,63}$") + } + + private enum CodingKeys: String, CodingKey { + case force = "Force" + case workspaceId = "WorkspaceId" + } + } + + public struct DeployWorkspaceApplicationsResult: AWSDecodableShape { + /// The list of deployed associations and information about them. + public let deployment: WorkSpaceApplicationDeployment? + + public init(deployment: WorkSpaceApplicationDeployment? = nil) { + self.deployment = deployment + } + + private enum CodingKeys: String, CodingKey { + case deployment = "Deployment" + } + } + public struct DeregisterWorkspaceDirectoryRequest: AWSEncodableShape { /// The identifier of the directory. If any WorkSpaces are registered to this directory, you must remove them before you deregister the directory, or you will receive an OperationNotSupportedException error. public let directoryId: String @@ -1445,6 +1678,157 @@ extension WorkSpaces { } } + public struct DescribeApplicationAssociationsRequest: AWSEncodableShape { + /// The identifier of the specified application. + public let applicationId: String + /// The resource type of the associated resources. + public let associatedResourceTypes: [ApplicationAssociatedResourceType] + /// The maximum number of associations to return. + public let maxResults: Int? + /// If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. + public let nextToken: String? + + public init(applicationId: String, associatedResourceTypes: [ApplicationAssociatedResourceType], maxResults: Int? = nil, nextToken: String? = nil) { + self.applicationId = applicationId + self.associatedResourceTypes = associatedResourceTypes + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^wsa-[0-9a-z]{8,63}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + case associatedResourceTypes = "AssociatedResourceTypes" + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct DescribeApplicationAssociationsResult: AWSDecodableShape { + /// List of associations and information about them. + public let associations: [ApplicationResourceAssociation]? + /// If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. + public let nextToken: String? + + public init(associations: [ApplicationResourceAssociation]? = nil, nextToken: String? = nil) { + self.associations = associations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case associations = "Associations" + case nextToken = "NextToken" + } + } + + public struct DescribeApplicationsRequest: AWSEncodableShape { + /// The identifiers of one or more applications. + public let applicationIds: [String]? + /// The compute types supported by the applications. + public let computeTypeNames: [Compute]? + /// The license availability for the applications. + public let licenseType: WorkSpaceApplicationLicenseType? + /// The maximum number of applications to return. + public let maxResults: Int? + /// If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. + public let nextToken: String? + /// The operating systems supported by the applications. + public let operatingSystemNames: [OperatingSystemName]? + /// The owner of the applications. + public let owner: String? + + public init(applicationIds: [String]? = nil, computeTypeNames: [Compute]? = nil, licenseType: WorkSpaceApplicationLicenseType? = nil, maxResults: Int? = nil, nextToken: String? = nil, operatingSystemNames: [OperatingSystemName]? = nil, owner: String? = nil) { + self.applicationIds = applicationIds + self.computeTypeNames = computeTypeNames + self.licenseType = licenseType + self.maxResults = maxResults + self.nextToken = nextToken + self.operatingSystemNames = operatingSystemNames + self.owner = owner + } + + public func validate(name: String) throws { + try self.applicationIds?.forEach { + try validate($0, name: "applicationIds[]", parent: name, pattern: "^wsa-[0-9a-z]{8,63}$") + } + try self.validate(self.applicationIds, name: "applicationIds", parent: name, max: 25) + try self.validate(self.applicationIds, name: "applicationIds", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.owner, name: "owner", parent: name, pattern: "^\\d{12}|AMAZON$") + } + + private enum CodingKeys: String, CodingKey { + case applicationIds = "ApplicationIds" + case computeTypeNames = "ComputeTypeNames" + case licenseType = "LicenseType" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case operatingSystemNames = "OperatingSystemNames" + case owner = "Owner" + } + } + + public struct DescribeApplicationsResult: AWSDecodableShape { + /// List of information about the specified applications. + public let applications: [WorkSpaceApplication]? + /// If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. + public let nextToken: String? + + public init(applications: [WorkSpaceApplication]? = nil, nextToken: String? = nil) { + self.applications = applications + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case applications = "Applications" + case nextToken = "NextToken" + } + } + + public struct DescribeBundleAssociationsRequest: AWSEncodableShape { + /// The resource types of the associated resource. + public let associatedResourceTypes: [BundleAssociatedResourceType] + /// The identifier of the bundle. + public let bundleId: String + + public init(associatedResourceTypes: [BundleAssociatedResourceType], bundleId: String) { + self.associatedResourceTypes = associatedResourceTypes + self.bundleId = bundleId + } + + public func validate(name: String) throws { + try self.validate(self.bundleId, name: "bundleId", parent: name, pattern: "^wsb-[0-9a-z]{8,63}$") + } + + private enum CodingKeys: String, CodingKey { + case associatedResourceTypes = "AssociatedResourceTypes" + case bundleId = "BundleId" + } + } + + public struct DescribeBundleAssociationsResult: AWSDecodableShape { + /// List of information about the specified associations. + public let associations: [BundleResourceAssociation]? + + public init(associations: [BundleResourceAssociation]? = nil) { + self.associations = associations + } + + private enum CodingKeys: String, CodingKey { + case associations = "Associations" + } + } + public struct DescribeClientBrandingRequest: AWSEncodableShape { /// The directory identifier of the WorkSpace for which you want to view client branding information. public let resourceId: String @@ -1688,6 +2072,40 @@ extension WorkSpaces { } } + public struct DescribeImageAssociationsRequest: AWSEncodableShape { + /// The resource types of the associated resource. + public let associatedResourceTypes: [ImageAssociatedResourceType] + /// The identifier of the image. + public let imageId: String + + public init(associatedResourceTypes: [ImageAssociatedResourceType], imageId: String) { + self.associatedResourceTypes = associatedResourceTypes + self.imageId = imageId + } + + public func validate(name: String) throws { + try self.validate(self.imageId, name: "imageId", parent: name, pattern: "^wsi-[0-9a-z]{9,63}$") + } + + private enum CodingKeys: String, CodingKey { + case associatedResourceTypes = "AssociatedResourceTypes" + case imageId = "ImageId" + } + } + + public struct DescribeImageAssociationsResult: AWSDecodableShape { + /// List of information about the specified associations. + public let associations: [ImageResourceAssociation]? + + public init(associations: [ImageResourceAssociation]? = nil) { + self.associations = associations + } + + private enum CodingKeys: String, CodingKey { + case associations = "Associations" + } + } + public struct DescribeIpGroupsRequest: AWSEncodableShape { /// The identifiers of one or more IP access control groups. public let groupIds: [String]? @@ -1766,6 +2184,40 @@ extension WorkSpaces { } } + public struct DescribeWorkspaceAssociationsRequest: AWSEncodableShape { + /// The resource types of the associated resources. + public let associatedResourceTypes: [WorkSpaceAssociatedResourceType] + /// The identifier of the WorkSpace. + public let workspaceId: String + + public init(associatedResourceTypes: [WorkSpaceAssociatedResourceType], workspaceId: String) { + self.associatedResourceTypes = associatedResourceTypes + self.workspaceId = workspaceId + } + + public func validate(name: String) throws { + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^ws-[0-9a-z]{8,63}$") + } + + private enum CodingKeys: String, CodingKey { + case associatedResourceTypes = "AssociatedResourceTypes" + case workspaceId = "WorkspaceId" + } + } + + public struct DescribeWorkspaceAssociationsResult: AWSDecodableShape { + /// List of information about the specified associations. + public let associations: [WorkspaceResourceAssociation]? + + public init(associations: [WorkspaceResourceAssociation]? = nil) { + self.associations = associations + } + + private enum CodingKeys: String, CodingKey { + case associations = "Associations" + } + } + public struct DescribeWorkspaceBundlesRequest: AWSEncodableShape { /// The identifiers of the bundles. You cannot combine this parameter with any other filter. public let bundleIds: [String]? @@ -2169,6 +2621,41 @@ extension WorkSpaces { public init() {} } + public struct DisassociateWorkspaceApplicationRequest: AWSEncodableShape { + /// The identifier of the application. + public let applicationId: String + /// The identifier of the WorkSpace. + public let workspaceId: String + + public init(applicationId: String, workspaceId: String) { + self.applicationId = applicationId + self.workspaceId = workspaceId + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^wsa-[0-9a-z]{8,63}$") + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^ws-[0-9a-z]{8,63}$") + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + case workspaceId = "WorkspaceId" + } + } + + public struct DisassociateWorkspaceApplicationResult: AWSDecodableShape { + /// Information about the targeted association. + public let association: WorkspaceResourceAssociation? + + public init(association: WorkspaceResourceAssociation? = nil) { + self.association = association + } + + private enum CodingKeys: String, CodingKey { + case association = "Association" + } + } + public struct ErrorDetails: AWSDecodableShape { /// Indicates the error code returned. public let errorCode: WorkspaceImageErrorDetailCode? @@ -2262,6 +2749,43 @@ extension WorkSpaces { } } + public struct ImageResourceAssociation: AWSDecodableShape { + /// The identifier of the associated resource. + public let associatedResourceId: String? + /// The resource type of the associated resources. + public let associatedResourceType: ImageAssociatedResourceType? + /// The time the association is created. + public let created: Date? + /// The identifier of the image. + public let imageId: String? + /// The time the association status was last updated. + public let lastUpdatedTime: Date? + /// The status of the image resource association. + public let state: AssociationState? + /// The reason the association deployment failed. + public let stateReason: AssociationStateReason? + + public init(associatedResourceId: String? = nil, associatedResourceType: ImageAssociatedResourceType? = nil, created: Date? = nil, imageId: String? = nil, lastUpdatedTime: Date? = nil, state: AssociationState? = nil, stateReason: AssociationStateReason? = nil) { + self.associatedResourceId = associatedResourceId + self.associatedResourceType = associatedResourceType + self.created = created + self.imageId = imageId + self.lastUpdatedTime = lastUpdatedTime + self.state = state + self.stateReason = stateReason + } + + private enum CodingKeys: String, CodingKey { + case associatedResourceId = "AssociatedResourceId" + case associatedResourceType = "AssociatedResourceType" + case created = "Created" + case imageId = "ImageId" + case lastUpdatedTime = "LastUpdatedTime" + case state = "State" + case stateReason = "StateReason" + } + } + public struct ImportClientBrandingRequest: AWSEncodableShape { /// The branding information to import for Android devices. public let deviceTypeAndroid: DefaultImportClientBrandingAttributes? @@ -3617,6 +4141,64 @@ extension WorkSpaces { } } + public struct WorkSpaceApplication: AWSDecodableShape { + /// The identifier of the application. + public let applicationId: String? + /// The time the application is created. + public let created: Date? + /// The description of the WorkSpace application. + public let description: String? + /// The license availability for the applications. + public let licenseType: WorkSpaceApplicationLicenseType? + /// The name of the WorkSpace application. + public let name: String? + /// The owner of the WorkSpace application. + public let owner: String? + /// The status of WorkSpace application. + public let state: WorkSpaceApplicationState? + /// The supported compute types of the WorkSpace application. + public let supportedComputeTypeNames: [Compute]? + /// The supported operating systems of the WorkSpace application. + public let supportedOperatingSystemNames: [OperatingSystemName]? + + public init(applicationId: String? = nil, created: Date? = nil, description: String? = nil, licenseType: WorkSpaceApplicationLicenseType? = nil, name: String? = nil, owner: String? = nil, state: WorkSpaceApplicationState? = nil, supportedComputeTypeNames: [Compute]? = nil, supportedOperatingSystemNames: [OperatingSystemName]? = nil) { + self.applicationId = applicationId + self.created = created + self.description = description + self.licenseType = licenseType + self.name = name + self.owner = owner + self.state = state + self.supportedComputeTypeNames = supportedComputeTypeNames + self.supportedOperatingSystemNames = supportedOperatingSystemNames + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + case created = "Created" + case description = "Description" + case licenseType = "LicenseType" + case name = "Name" + case owner = "Owner" + case state = "State" + case supportedComputeTypeNames = "SupportedComputeTypeNames" + case supportedOperatingSystemNames = "SupportedOperatingSystemNames" + } + } + + public struct WorkSpaceApplicationDeployment: AWSDecodableShape { + /// The associations between the applications and the associated resources. + public let associations: [WorkspaceResourceAssociation]? + + public init(associations: [WorkspaceResourceAssociation]? = nil) { + self.associations = associations + } + + private enum CodingKeys: String, CodingKey { + case associations = "Associations" + } + } + public struct Workspace: AWSDecodableShape { /// The identifier of the bundle used to create the WorkSpace. public let bundleId: String? @@ -3940,7 +4522,7 @@ extension WorkSpaces { public let description: String? /// The error code that is returned for the image. public let errorCode: String? - /// The details of the error returned for the image. + /// Additional details of the error returned for the image, including the possible causes of the errors and troubleshooting information. public let errorDetails: [ErrorDetails]? /// The text of the error message that is returned for the image. public let errorMessage: String? @@ -3993,6 +4575,8 @@ extension WorkSpaces { public struct WorkspaceProperties: AWSEncodableShape & AWSDecodableShape { /// The compute type. For more information, see Amazon WorkSpaces Bundles. public let computeTypeName: Compute? + /// The name of the operating system. + public let operatingSystemName: OperatingSystemName? /// The protocol. For more information, see Protocols for Amazon WorkSpaces. Only available for WorkSpaces created with PCoIP bundles. The Protocols property is case sensitive. Ensure you use PCOIP or WSP. Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn). public let protocols: [`Protocol`]? /// The size of the root volume. For important information about how to modify the size of the root and user volumes, see Modify a WorkSpace. @@ -4004,8 +4588,9 @@ extension WorkSpaces { /// The size of the user storage. For important information about how to modify the size of the root and user volumes, see Modify a WorkSpace. public let userVolumeSizeGib: Int? - public init(computeTypeName: Compute? = nil, protocols: [`Protocol`]? = nil, rootVolumeSizeGib: Int? = nil, runningMode: RunningMode? = nil, runningModeAutoStopTimeoutInMinutes: Int? = nil, userVolumeSizeGib: Int? = nil) { + public init(computeTypeName: Compute? = nil, operatingSystemName: OperatingSystemName? = nil, protocols: [`Protocol`]? = nil, rootVolumeSizeGib: Int? = nil, runningMode: RunningMode? = nil, runningModeAutoStopTimeoutInMinutes: Int? = nil, userVolumeSizeGib: Int? = nil) { self.computeTypeName = computeTypeName + self.operatingSystemName = operatingSystemName self.protocols = protocols self.rootVolumeSizeGib = rootVolumeSizeGib self.runningMode = runningMode @@ -4015,6 +4600,7 @@ extension WorkSpaces { private enum CodingKeys: String, CodingKey { case computeTypeName = "ComputeTypeName" + case operatingSystemName = "OperatingSystemName" case protocols = "Protocols" case rootVolumeSizeGib = "RootVolumeSizeGib" case runningMode = "RunningMode" @@ -4076,6 +4662,43 @@ extension WorkSpaces { } } + public struct WorkspaceResourceAssociation: AWSDecodableShape { + /// The identifier of the associated resource. + public let associatedResourceId: String? + /// The resource types of the associated resource. + public let associatedResourceType: WorkSpaceAssociatedResourceType? + /// The time the association is created. + public let created: Date? + /// The time the association status was last updated. + public let lastUpdatedTime: Date? + /// The status of the WorkSpace resource association. + public let state: AssociationState? + /// The reason the association deployment failed. + public let stateReason: AssociationStateReason? + /// The identifier of the WorkSpace. + public let workspaceId: String? + + public init(associatedResourceId: String? = nil, associatedResourceType: WorkSpaceAssociatedResourceType? = nil, created: Date? = nil, lastUpdatedTime: Date? = nil, state: AssociationState? = nil, stateReason: AssociationStateReason? = nil, workspaceId: String? = nil) { + self.associatedResourceId = associatedResourceId + self.associatedResourceType = associatedResourceType + self.created = created + self.lastUpdatedTime = lastUpdatedTime + self.state = state + self.stateReason = stateReason + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case associatedResourceId = "AssociatedResourceId" + case associatedResourceType = "AssociatedResourceType" + case created = "Created" + case lastUpdatedTime = "LastUpdatedTime" + case state = "State" + case stateReason = "StateReason" + case workspaceId = "WorkspaceId" + } + } + public struct WorkspacesIpGroup: AWSDecodableShape { /// The description of the group. public let groupDesc: String? @@ -4108,13 +4731,18 @@ extension WorkSpaces { public struct WorkSpacesErrorType: AWSErrorType { enum Code: String { case accessDeniedException = "AccessDeniedException" + case applicationNotSupportedException = "ApplicationNotSupportedException" + case computeNotCompatibleException = "ComputeNotCompatibleException" + case incompatibleApplicationsException = "IncompatibleApplicationsException" case invalidParameterValuesException = "InvalidParameterValuesException" case invalidResourceStateException = "InvalidResourceStateException" + case operatingSystemNotCompatibleException = "OperatingSystemNotCompatibleException" case operationInProgressException = "OperationInProgressException" case operationNotSupportedException = "OperationNotSupportedException" case resourceAlreadyExistsException = "ResourceAlreadyExistsException" case resourceAssociatedException = "ResourceAssociatedException" case resourceCreationFailedException = "ResourceCreationFailedException" + case resourceInUseException = "ResourceInUseException" case resourceLimitExceededException = "ResourceLimitExceededException" case resourceNotFoundException = "ResourceNotFoundException" case resourceUnavailableException = "ResourceUnavailableException" @@ -4143,10 +4771,18 @@ public struct WorkSpacesErrorType: AWSErrorType { /// The user is not authorized to access a resource. public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The specified application is not supported. + public static var applicationNotSupportedException: Self { .init(.applicationNotSupportedException) } + /// The compute type of the WorkSpace is not compatible with the application. + public static var computeNotCompatibleException: Self { .init(.computeNotCompatibleException) } + /// The specified application is not compatible with the resource. + public static var incompatibleApplicationsException: Self { .init(.incompatibleApplicationsException) } /// One or more parameter values are not valid. public static var invalidParameterValuesException: Self { .init(.invalidParameterValuesException) } /// The state of the resource is not valid for this operation. public static var invalidResourceStateException: Self { .init(.invalidResourceStateException) } + /// The operating system of the WorkSpace is not compatible with the application. + public static var operatingSystemNotCompatibleException: Self { .init(.operatingSystemNotCompatibleException) } /// The properties of this WorkSpace are currently being modified. Try again in a moment. public static var operationInProgressException: Self { .init(.operationInProgressException) } /// This operation is not supported. @@ -4157,6 +4793,8 @@ public struct WorkSpacesErrorType: AWSErrorType { public static var resourceAssociatedException: Self { .init(.resourceAssociatedException) } /// The resource could not be created. public static var resourceCreationFailedException: Self { .init(.resourceCreationFailedException) } + /// The specified resource is currently in use. + public static var resourceInUseException: Self { .init(.resourceInUseException) } /// Your resource limits have been exceeded. public static var resourceLimitExceededException: Self { .init(.resourceLimitExceededException) } /// The resource could not be found. diff --git a/models/appconfig.json b/models/appconfig.json index 7af8cbec1b..2f2a892b16 100644 --- a/models/appconfig.json +++ b/models/appconfig.json @@ -1570,6 +1570,18 @@ "traits": { "smithy.api#documentation": "

The type of configurations contained in the profile. AppConfig supports\n feature flags and freeform configurations. We recommend you\n create feature flag configurations to enable or disable new features and freeform\n configurations to distribute configurations to an application. When calling this API, enter\n one of the following values for Type:

\n

\n AWS.AppConfig.FeatureFlags\n

\n

\n AWS.Freeform\n

" } + }, + "KmsKeyArn": { + "target": "com.amazonaws.appconfig#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name of the Key Management Service key to encrypt new configuration data\n versions in the AppConfig hosted configuration store. This\n attribute is only used for hosted configuration types. To encrypt data managed\n in other configuration stores, see the documentation for how to specify an KMS key for that particular service.

" + } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.appconfig#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.

" + } } } }, @@ -1833,6 +1845,12 @@ "traits": { "smithy.api#documentation": "

The type of configurations contained in the profile. AppConfig supports\n feature flags and freeform configurations. We recommend you\n create feature flag configurations to enable or disable new features and freeform\n configurations to distribute configurations to an application. When calling this API, enter\n one of the following values for Type:

\n

\n AWS.AppConfig.FeatureFlags\n

\n

\n AWS.Freeform\n

" } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.appconfig#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier for an Key Management Service key to encrypt new configuration\n data versions in the AppConfig hosted configuration store. This attribute is only\n used for hosted configuration types. The identifier can be an KMS key ID, alias, or the Amazon Resource Name (ARN) of the key ID or alias.\n To encrypt data managed in other configuration stores, see the documentation for how to\n specify an KMS key for that particular service.

" + } } }, "traits": { @@ -2145,7 +2163,7 @@ "type": "structure", "members": { "Name": { - "target": "com.amazonaws.appconfig#Name", + "target": "com.amazonaws.appconfig#ExtensionOrParameterName", "traits": { "smithy.api#documentation": "

A name for the extension. Each extension name in your account must be unique. Extension\n versions use the same name.

", "smithy.api#required": {} @@ -2840,9 +2858,9 @@ } }, "KmsKeyIdentifier": { - "target": "com.amazonaws.appconfig#Identifier", + "target": "com.amazonaws.appconfig#KmsKeyIdentifier", "traits": { - "smithy.api#documentation": "

The KMS key identifier (key ID, key alias, or key ARN). AppConfig uses this ID to encrypt the configuration data using a customer managed key.

" + "smithy.api#documentation": "

The Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.

" } }, "VersionLabel": { @@ -3413,6 +3431,12 @@ } } }, + "com.amazonaws.appconfig#ExtensionOrParameterName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[^\\/#:\\n]{1,64}$" + } + }, "com.amazonaws.appconfig#ExtensionSummaries": { "type": "list", "member": { @@ -4145,6 +4169,13 @@ "smithy.api#documentation": "

A user-defined label for an AppConfig hosted configuration version.

", "smithy.api#httpHeader": "VersionLabel" } + }, + "KmsKeyArn": { + "target": "com.amazonaws.appconfig#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name of the Key Management Service key that was used to encrypt this\n specific version of the configuration data in the AppConfig hosted configuration\n store.

", + "smithy.api#httpHeader": "KmsKeyArn" + } } } }, @@ -4187,6 +4218,12 @@ "traits": { "smithy.api#documentation": "

A user-defined label for an AppConfig hosted configuration version.

" } + }, + "KmsKeyArn": { + "target": "com.amazonaws.appconfig#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name of the Key Management Service key that was used to encrypt this\n specific version of the configuration data in the AppConfig hosted configuration\n store.

" + } } }, "traits": { @@ -4300,6 +4337,18 @@ "smithy.api#timestampFormat": "date-time" } }, + "com.amazonaws.appconfig#KmsKeyIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}|alias/[a-zA-Z0-9/_-]{1,250}|arn:aws[a-zA-Z-]*:kms:[a-z]{2}(-gov|-iso(b?))?-[a-z]+-\\d{1}:\\d{12}:(key/[0-9a-f-]{36}|alias/[a-zA-Z0-9/_-]{1,250})$" + } + }, + "com.amazonaws.appconfig#KmsKeyIdentifierOrEmpty": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\s{0,1}|[\\da-f]{8}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{4}-[\\da-f]{12}|alias/[a-zA-Z0-9/_-]{1,250}|arn:aws[a-zA-Z-]*:kms:[a-z]{2}(-gov|-iso(b?))?-[a-z]+-\\d{1}:\\d{12}:(key/[0-9a-f-]{36}|alias/[a-zA-Z0-9/_-]{1,250})$" + } + }, "com.amazonaws.appconfig#ListApplications": { "type": "operation", "input": { @@ -5093,7 +5142,7 @@ "com.amazonaws.appconfig#ParameterMap": { "type": "map", "key": { - "target": "com.amazonaws.appconfig#Name" + "target": "com.amazonaws.appconfig#ExtensionOrParameterName" }, "value": { "target": "com.amazonaws.appconfig#Parameter" @@ -5108,7 +5157,7 @@ "com.amazonaws.appconfig#ParameterValueMap": { "type": "map", "key": { - "target": "com.amazonaws.appconfig#Name" + "target": "com.amazonaws.appconfig#ExtensionOrParameterName" }, "value": { "target": "com.amazonaws.appconfig#StringWithLengthBetween1And2048" @@ -5229,7 +5278,7 @@ } }, "traits": { - "smithy.api#documentation": "

The number of one more AppConfig resources exceeds the maximum allowed. Verify that your\n environment doesn't exceed the following service quotas:

\n

Applications: 100 max

\n

Deployment strategies: 20 max

\n

Configuration profiles: 100 max per application

\n

Environments: 20 max per application

\n

To resolve this issue, you can delete one or more resources and try again. Or, you\n can request a quota increase. For more information about quotas and to request an increase,\n see Service quotas for AppConfig in the Amazon Web Services General Reference.

", + "smithy.api#documentation": "

The number of one more AppConfig resources exceeds the maximum allowed. Verify that your\n environment doesn't exceed the following service quotas:

\n

Applications: 100 max

\n

Deployment strategies: 20 max

\n

Configuration profiles: 100 max per application

\n

Environments: 20 max per application

\n

To resolve this issue, you can delete one or more resources and try again. Or, you can\n request a quota increase. For more information about quotas and to request an increase, see\n Service quotas for AppConfig in the Amazon Web Services General Reference.

", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -5318,7 +5367,7 @@ } }, "KmsKeyIdentifier": { - "target": "com.amazonaws.appconfig#Identifier", + "target": "com.amazonaws.appconfig#KmsKeyIdentifier", "traits": { "smithy.api#documentation": "

The KMS key identifier (key ID, key alias, or key ARN). AppConfig uses this ID to encrypt the configuration data using a customer managed key.

" } @@ -5809,6 +5858,12 @@ "traits": { "smithy.api#documentation": "

A list of methods for validating the configuration.

" } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.appconfig#KmsKeyIdentifierOrEmpty", + "traits": { + "smithy.api#documentation": "

The identifier for a Key Management Service key to encrypt new configuration\n data versions in the AppConfig hosted configuration store. This attribute is only\n used for hosted configuration types. The identifier can be an KMS key ID, alias, or the Amazon Resource Name (ARN) of the key ID or alias.\n To encrypt data managed in other configuration stores, see the documentation for how to\n specify an KMS key for that particular service.

" + } } }, "traits": { diff --git a/models/auditmanager.json b/models/auditmanager.json index e6c1e11c66..8a990bb257 100644 --- a/models/auditmanager.json +++ b/models/auditmanager.json @@ -61,6 +61,10 @@ "target": "com.amazonaws.auditmanager#AWSAccount" }, "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, "smithy.api#sensitive": {} } }, diff --git a/models/auto-scaling.json b/models/auto-scaling.json index 8d867b1718..9eb02fb8ac 100644 --- a/models/auto-scaling.json +++ b/models/auto-scaling.json @@ -438,6 +438,16 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.autoscaling#AnyPrintableAsciiStringMaxLen4000": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4000 + }, + "smithy.api#pattern": "^[\\u0009\\u000A\\u000D\\u0020-\\u007e]+$" + } + }, "com.amazonaws.autoscaling#AsciiStringMaxLen255": { "type": "string", "traits": { @@ -3975,7 +3985,12 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about the instance refreshes for the specified Auto Scaling group.

\n

This operation is part of the instance refresh\n feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group\n after you make configuration changes.

\n

To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information\n about the instance refreshes you previously initiated, including their status, start\n time, end time, the percentage of the instance refresh that is complete, and the number\n of instances remaining to update before the instance refresh is complete. If a rollback\n is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information\n about the rollback of the instance refresh.

" + "smithy.api#documentation": "

Gets information about the instance refreshes for the specified Auto Scaling group.

\n

This operation is part of the instance refresh\n feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group\n after you make configuration changes.

\n

To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information\n about the instance refreshes you previously initiated, including their status, start\n time, end time, the percentage of the instance refresh that is complete, and the number\n of instances remaining to update before the instance refresh is complete. If a rollback\n is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information\n about the rollback of the instance refresh.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxRecords" + } } }, "com.amazonaws.autoscaling#DescribeInstanceRefreshesAnswer": { @@ -4225,7 +4240,12 @@ ] } } - ] + ], + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxRecords" + } } }, "com.amazonaws.autoscaling#DescribeLoadBalancerTargetGroupsRequest": { @@ -4309,7 +4329,12 @@ ] } } - ] + ], + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxRecords" + } } }, "com.amazonaws.autoscaling#DescribeLoadBalancersRequest": { @@ -6991,7 +7016,7 @@ } }, "NotificationMetadata": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen1023", + "target": "com.amazonaws.autoscaling#AnyPrintableAsciiStringMaxLen4000", "traits": { "smithy.api#documentation": "

Additional information that is included any time Amazon EC2 Auto Scaling sends a message to the\n notification target.

" } @@ -7049,7 +7074,7 @@ } }, "NotificationMetadata": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen1023", + "target": "com.amazonaws.autoscaling#AnyPrintableAsciiStringMaxLen4000", "traits": { "smithy.api#documentation": "

Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to\n the notification target.

" } @@ -8440,7 +8465,7 @@ } }, "NotificationMetadata": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen1023", + "target": "com.amazonaws.autoscaling#AnyPrintableAsciiStringMaxLen4000", "traits": { "smithy.api#documentation": "

Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to\n the notification target.

" } diff --git a/models/config-service.json b/models/config-service.json index d98e34a8d6..14af71ad5c 100644 --- a/models/config-service.json +++ b/models/config-service.json @@ -675,7 +675,7 @@ "configurationItemStatus": { "target": "com.amazonaws.configservice#ConfigurationItemStatus", "traits": { - "smithy.api#documentation": "

The configuration item status. The valid values are:

\n
    \n
  • \n

    OK – The resource configuration has been updated

    \n
  • \n
  • \n

    ResourceDiscovered – The resource was newly discovered

    \n
  • \n
  • \n

    ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
  • \n

    ResourceDeleted – The resource was deleted

    \n
  • \n
  • \n

    ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
\n \n

The CIs do not incur any cost.

\n
" + "smithy.api#documentation": "

The configuration item status. The valid values are:

\n
    \n
  • \n

    OK – The resource configuration has been updated

    \n
  • \n
  • \n

    ResourceDiscovered – The resource was newly discovered

    \n
  • \n
  • \n

    ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
  • \n

    ResourceDeleted – The resource was deleted

    \n
  • \n
  • \n

    ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
" } }, "configurationStateId": { @@ -1631,7 +1631,7 @@ "configurationItemStatus": { "target": "com.amazonaws.configservice#ConfigurationItemStatus", "traits": { - "smithy.api#documentation": "

The configuration item status. The valid values are:

\n
    \n
  • \n

    OK – The resource configuration has been updated

    \n
  • \n
  • \n

    ResourceDiscovered – The resource was newly discovered

    \n
  • \n
  • \n

    ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
  • \n

    ResourceDeleted – The resource was deleted

    \n
  • \n
  • \n

    ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
\n \n

The CIs do not incur any cost.

\n
" + "smithy.api#documentation": "

The configuration item status. The valid values are:

\n
    \n
  • \n

    OK – The resource configuration has been updated

    \n
  • \n
  • \n

    ResourceDiscovered – The resource was newly discovered

    \n
  • \n
  • \n

    ResourceNotRecorded – The resource was discovered but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
  • \n

    ResourceDeleted – The resource was deleted

    \n
  • \n
  • \n

    ResourceDeletedNotRecorded – The resource was deleted but its configuration was not recorded since the recorder excludes the recording of resources of this type

    \n
  • \n
" } }, "configurationStateId": { @@ -5271,7 +5271,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration object for Config rule evaluation mode. The Supported valid values are Detective or Proactive.

" + "smithy.api#documentation": "

The configuration object for Config rule evaluation mode. The supported valid values are Detective or Proactive.

" } }, "com.amazonaws.configservice#EvaluationModes": { @@ -5465,7 +5465,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies whether the configuration recorder excludes resource types from being recorded.\n\t\t\tUse the resourceTypes field to enter a comma-separated list of resource types to exclude as exemptions.

" + "smithy.api#documentation": "

Specifies whether the configuration recorder excludes certain resource types from being recorded.\n\t\t\tUse the resourceTypes field to enter a comma-separated list of resource types you want to exclude from recording.

\n

By default, when Config adds support for a new resource type in the Region where you set up the configuration recorder,\n\t\t\tincluding global resource types, Config starts recording resources of that type automatically.

\n \n

\n How to use\n

\n

To use this option, you must set the useOnly\n\t\t\t\tfield of RecordingStrategy\n\t\t\t\tto EXCLUSION_BY_RESOURCE_TYPES.

\n

Config will then record configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded.

\n

\n Globally recorded resources\n

\n

Unless specifically listed as exclusions,\n\t\t\t\tAWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.\n\t\t\t\tIAM users, groups, roles, and customer managed policies will be recorded automatically in all enabled Config Regions where Config was available before February 2022.\n\t\t\t\tThis list does not include the following Regions:

\n
    \n
  • \n

    Asia Pacific (Hyderabad)

    \n
  • \n
  • \n

    Asia Pacific (Melbourne)

    \n
  • \n
  • \n

    Europe (Spain)

    \n
  • \n
  • \n

    Europe (Zurich)

    \n
  • \n
  • \n

    Israel (Tel Aviv)

    \n
  • \n
  • \n

    Middle East (UAE)

    \n
  • \n
\n
" } }, "com.amazonaws.configservice#ExecutionControls": { @@ -6902,7 +6902,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of ConfigurationItems for the specified resource.\n\t\t\tThe list contains details about each state of the resource\n\t\t\tduring the specified time interval. If you specified a retention\n\t\t\tperiod to retain your ConfigurationItems between a\n\t\t\tminimum of 30 days and a maximum of 7 years (2557 days), Config\n\t\t\treturns the ConfigurationItems for the specified\n\t\t\tretention period.

\n

The response is paginated. By default, Config returns a\n\t\t\tlimit of 10 configuration items per page. You can customize this\n\t\t\tnumber with the limit parameter. The response includes\n\t\t\ta nextToken string. To get the next page of results,\n\t\t\trun the request again and specify the string for the\n\t\t\t\tnextToken parameter.

\n \n

Each call to the API is limited to span a duration of seven\n\t\t\t\tdays. It is likely that the number of records returned is\n\t\t\t\tsmaller than the specified limit. In such cases,\n\t\t\t\tyou can make another call, using the\n\t\t\t\tnextToken.

\n
", + "smithy.api#documentation": "\n

For accurate reporting on the compliance status, you must record the AWS::Config::ResourceCompliance resource type.\n\t\t\tFor more information, see Selecting Which Resources Config Records.

\n
\n

Returns a list of ConfigurationItems for the specified resource.\n\t\t\tThe list contains details about each state of the resource\n\t\t\tduring the specified time interval. If you specified a retention\n\t\t\tperiod to retain your ConfigurationItems between a\n\t\t\tminimum of 30 days and a maximum of 7 years (2557 days), Config\n\t\t\treturns the ConfigurationItems for the specified\n\t\t\tretention period.

\n

The response is paginated. By default, Config returns a\n\t\t\tlimit of 10 configuration items per page. You can customize this\n\t\t\tnumber with the limit parameter. The response includes\n\t\t\ta nextToken string. To get the next page of results,\n\t\t\trun the request again and specify the string for the\n\t\t\t\tnextToken parameter.

\n \n

Each call to the API is limited to span a duration of seven\n\t\t\t\tdays. It is likely that the number of records returned is\n\t\t\t\tsmaller than the specified limit. In such cases,\n\t\t\t\tyou can make another call, using the\n\t\t\t\tnextToken.

\n
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -6931,13 +6931,13 @@ "laterTime": { "target": "com.amazonaws.configservice#LaterTime", "traits": { - "smithy.api#documentation": "

The time stamp that indicates a later time. If not specified,\n\t\t\tcurrent time is taken.

" + "smithy.api#documentation": "

The chronologically latest time in the time range for which the history requested. If not specified,\n\t\t\tcurrent time is taken.

" } }, "earlierTime": { "target": "com.amazonaws.configservice#EarlierTime", "traits": { - "smithy.api#documentation": "

The time stamp that indicates an earlier time. If not\n\t\t\tspecified, the action returns paginated results that contain\n\t\t\tconfiguration items that start when the first configuration item was\n\t\t\trecorded.

" + "smithy.api#documentation": "

The chronologically earliest time in the time range for which the history requested. If not\n\t\t\tspecified, the action returns paginated results that contain\n\t\t\tconfiguration items that start when the first configuration item was\n\t\t\trecorded.

" } }, "chronologicalOrder": { @@ -9153,7 +9153,7 @@ } }, "traits": { - "smithy.api#documentation": "

organization custom rule metadata such as resource type, resource ID of Amazon Web Services resource, Lambda function ARN, \n\t\t\tand organization trigger types that trigger Config to evaluate your Amazon Web Services resources against a rule. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

" + "smithy.api#documentation": "

An object that specifies organization custom rule metadata such as resource type, resource ID of Amazon Web Services resource, Lambda function ARN, \n\t\t\tand organization trigger types that trigger Config to evaluate your Amazon Web Services resources against a rule. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

" } }, "com.amazonaws.configservice#OrganizationManagedRuleMetadata": { @@ -9210,7 +9210,7 @@ } }, "traits": { - "smithy.api#documentation": "

organization managed rule metadata such as resource type and ID of Amazon Web Services resource along with the rule identifier. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

" + "smithy.api#documentation": "

An object that specifies organization managed rule metadata such as resource type and ID of Amazon Web Services resource along with the rule identifier. \n\t\t\tIt also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

" } }, "com.amazonaws.configservice#OrganizationResourceDetailedStatus": { @@ -9802,7 +9802,7 @@ "TemplateS3Uri": { "target": "com.amazonaws.configservice#TemplateS3Uri", "traits": { - "smithy.api#documentation": "

The location of the file containing the template body (s3://bucketname/prefix). The uri must point to a conformance pack template (max size: 300 KB) that is located in an Amazon S3 bucket in the same Region as the conformance pack.

\n \n

You must have access to read Amazon S3 bucket.

\n
" + "smithy.api#documentation": "

The location of the file containing the template body (s3://bucketname/prefix). The uri must point to a conformance pack template (max size: 300 KB) that is located in an Amazon S3 bucket in the same Region as the conformance pack.

\n \n

You must have access to read Amazon S3 bucket.\n\t\t\tIn addition, in order to ensure a successful deployment, the template object must not be in an archived storage class if this parameter is passed.

\n
" } }, "TemplateBody": { @@ -9889,7 +9889,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a delivery channel object to deliver configuration\n\t\t\tinformation to an Amazon S3 bucket and Amazon SNS topic.

\n

Before you can create a delivery channel, you must create a\n\t\t\tconfiguration recorder.

\n

You can use this action to change the Amazon S3 bucket or an\n\t\t\tAmazon SNS topic of the existing delivery channel. To change the\n\t\t\tAmazon S3 bucket or an Amazon SNS topic, call this action and\n\t\t\tspecify the changed values for the S3 bucket and the SNS topic. If\n\t\t\tyou specify a different value for either the S3 bucket or the SNS\n\t\t\ttopic, this action will keep the existing value for the parameter\n\t\t\tthat is not changed.

\n \n

You can have only one delivery channel per region in your\n\t\t\t\taccount.

\n
" + "smithy.api#documentation": "

Creates a delivery channel object to deliver configuration\n\t\t\tinformation and other compliance information to an Amazon S3 bucket and Amazon SNS topic.\n\t\t\tFor more information,\n\t\t\tsee Notifications that Config Sends to an Amazon SNS topic.

\n

Before you can create a delivery channel, you must create a\n\t\t\tconfiguration recorder.

\n

You can use this action to change the Amazon S3 bucket or an\n\t\t\tAmazon SNS topic of the existing delivery channel. To change the\n\t\t\tAmazon S3 bucket or an Amazon SNS topic, call this action and\n\t\t\tspecify the changed values for the S3 bucket and the SNS topic. If\n\t\t\tyou specify a different value for either the S3 bucket or the SNS\n\t\t\ttopic, this action will keep the existing value for the parameter\n\t\t\tthat is not changed.

\n \n

You can have only one delivery channel per region in your\n\t\t\t\taccount.

\n
" } }, "com.amazonaws.configservice#PutDeliveryChannelRequest": { @@ -10166,7 +10166,7 @@ "TemplateS3Uri": { "target": "com.amazonaws.configservice#TemplateS3Uri", "traits": { - "smithy.api#documentation": "

Location of file containing the template body. The uri must point to the conformance pack template\n\t\t\t(max size: 300 KB).

\n \n

You must have access to read Amazon S3 bucket.

\n
" + "smithy.api#documentation": "

Location of file containing the template body. The uri must point to the conformance pack template\n\t\t\t(max size: 300 KB).

\n \n

You must have access to read Amazon S3 bucket.\n\t\t\tIn addition, in order to ensure a successful deployment, the template object must not be in an archived storage class if this parameter is passed.

\n
" } }, "TemplateBody": { @@ -10620,14 +10620,14 @@ "target": "com.amazonaws.configservice#AllSupported", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether Config records configuration changes for all supported regional resource types.

\n

If you set this field to true, when Config\n\t\t\tadds support for a new type of regional resource, Config starts recording resources of that type automatically.

\n

If you set this field to true,\n\t\t\tyou cannot enumerate specific resource types to record in the resourceTypes field of RecordingGroup, or to exclude in the resourceTypes field of ExclusionByResourceTypes.

" + "smithy.api#documentation": "

Specifies whether Config records configuration changes for all supported regionally recorded resource types.

\n

If you set this field to true, when Config\n\t\t\tadds support for a new regionally recorded resource type, Config starts recording resources of that type automatically.

\n

If you set this field to true,\n\t\t\tyou cannot enumerate specific resource types to record in the resourceTypes field of RecordingGroup, or to exclude in the resourceTypes field of ExclusionByResourceTypes.

\n \n

\n Region Availability\n

\n

Check Resource Coverage by Region Availability\n\t\t\t\tto see if a resource type is supported in the Amazon Web Services Region where you set up Config.

\n
" } }, "includeGlobalResourceTypes": { "target": "com.amazonaws.configservice#IncludeGlobalResourceTypes", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether Config records configuration changes for all supported global resources.

\n

Before you set this field to true,\n\t\t\tset the allSupported field of RecordingGroup to\n\t\t\ttrue. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

\n

If you set this field to true, when Config\n\t\t\tadds support for a new type of global resource in the Region where you set up the configuration recorder, Config starts recording\n\t\t\tresources of that type automatically.

\n \n

If you set this field to false but list global resource types in the resourceTypes field of RecordingGroup,\n\t\t\tConfig will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

\n

If you do not want to record configuration changes to global resource types, make sure to not list them in the resourceTypes field\n\t\t\tin addition to setting the includeGlobalResourceTypes field to false.

\n
" + "smithy.api#documentation": "

A legacy field which only applies to the globally recorded IAM resource types: IAM users, groups, roles, and customer managed policies.\n\t\t\tIf you select this option, these resource types will be recorded in all enabled Config regions where Config was available before February 2022.\n\t\t\tThis list does not include the following Regions:

\n
    \n
  • \n

    Asia Pacific (Hyderabad)

    \n
  • \n
  • \n

    Asia Pacific (Melbourne)

    \n
  • \n
  • \n

    Europe (Spain)

    \n
  • \n
  • \n

    Europe (Zurich)

    \n
  • \n
  • \n

    Israel (Tel Aviv)

    \n
  • \n
  • \n

    Middle East (UAE)

    \n
  • \n
\n \n

\n Aurora global clusters are automatically globally recorded\n

\n

The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled, even if includeGlobalResourceTypes is not set to true.\n\t\t\t\tincludeGlobalResourceTypes is a legacy field which only applies to IAM users, groups, roles, and customer managed policies.\n\t\t\t

\n

If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use one of the following recording strategies:

\n
    \n
  1. \n

    \n Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

    \n
  2. \n
  3. \n

    \n Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

    \n
  4. \n
\n

For more information, see Selecting Which Resources are Recorded in the Config developer guide.

\n
\n \n

\n Required and optional fields\n

\n

Before you set this field to true,\n\t\t\tset the allSupported field of RecordingGroup to\n\t\t\ttrue. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

\n
\n \n

\n Overriding fields\n

\n

If you set this field to false but list globally recorded IAM resource types in the resourceTypes field of RecordingGroup,\n\t\t\tConfig will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

\n

If you do not want to record configuration changes to the globally recorded IAM resource types (IAM users, groups, roles, and customer managed policies), make sure to not list them in the resourceTypes field\n\t\t\tin addition to setting the includeGlobalResourceTypes field to false.

\n
" } }, "resourceTypes": { @@ -10645,12 +10645,12 @@ "recordingStrategy": { "target": "com.amazonaws.configservice#RecordingStrategy", "traits": { - "smithy.api#documentation": "

An object that specifies the recording strategy for the configuration recorder.

\n
    \n
  • \n

    If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type.

    \n
  • \n
  • \n

    If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup.

    \n
  • \n
  • \n

    If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types\n\t\t\t\texcept the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

    \n
  • \n
\n \n

The recordingStrategy field is optional when you set the\n\t\t\tallSupported field of RecordingGroup to true.

\n

The recordingStrategy field is optional when you list resource types in the\n\t\t\t\tresourceTypes field of RecordingGroup.

\n

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

\n
\n \n

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

\n

For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically\n\t\t\trecorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes.

\n

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy,\n\t\t\t\twhen Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types,\n\t\t\t\tConfig starts recording resources of that type automatically.

\n
" + "smithy.api#documentation": "

An object that specifies the recording strategy for the configuration recorder.

\n
    \n
  • \n

    If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regionally recorded resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new regionally recorded resource type, Config automatically starts recording resources of that type.

    \n
  • \n
  • \n

    If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup.

    \n
  • \n
  • \n

    If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types\n\t\t\t\texcept the resource types that you specify to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

    \n
  • \n
\n \n

\n Required and optional fields\n

\n

The recordingStrategy field is optional when you set the\n\t\t\tallSupported field of RecordingGroup to true.

\n

The recordingStrategy field is optional when you list resource types in the\n\t\t\t\tresourceTypes field of RecordingGroup.

\n

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

\n
\n \n

\n Overriding fields\n

\n

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

\n

For example, even if you set includeGlobalResourceTypes to false, globally recorded IAM resource types will still be automatically\n\t\t\trecorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

\n
\n \n

\n Global resources types and the resource exclusion recording strategy\n

\n

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy,\n\t\t\twhen Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types,\n\t\t\tConfig starts recording resources of that type automatically.

\n

In addition, unless specifically listed as exclusions,\n\t\t\t\tAWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.\n\t\t\t\tIAM users, groups, roles, and customer managed policies will be recorded automatically in all enabled Config Regions where Config was available before February 2022.\n\t\t\t\tThis list does not include the following Regions:

\n
    \n
  • \n

    Asia Pacific (Hyderabad)

    \n
  • \n
  • \n

    Asia Pacific (Melbourne)

    \n
  • \n
  • \n

    Europe (Spain)

    \n
  • \n
  • \n

    Europe (Zurich)

    \n
  • \n
  • \n

    Israel (Tel Aviv)

    \n
  • \n
  • \n

    Middle East (UAE)

    \n
  • \n
\n
" } } }, "traits": { - "smithy.api#documentation": "

Specifies which resource types Config\n\t\t\trecords for configuration changes.\n\t\t\tIn the recording group, you specify whether you want to record all supported resource types or to include or exclude specific types of resources.

\n

By default, Config records configuration changes for all supported types of\n\t\t\t\tRegional resources that Config discovers in the\n\t\t\t\tAmazon Web Services Region in which it is running. Regional resources are tied to a\n\t\t\tRegion and can be used only in that Region. Examples of Regional resources are Amazon EC2 instances and Amazon EBS volumes.

\n

You can also have Config record supported types of global resources.\n\t\t\t\tGlobal resources are not tied to a specific Region and can be used in all Regions. The global\n\t\t\t\tresource types that Config supports include IAM users, groups, roles, and customer managed\n\t\t\t\tpolicies.

\n \n

Global resource types onboarded to Config recording after February 2022 will\n\t\t\t\tbe recorded only in the service's home Region for the commercial partition and\n\t\t\t\tAmazon Web Services GovCloud (US-West) for the Amazon Web Services GovCloud (US) partition. You can view the\n\t\t\t\tConfiguration Items for these new global resource types only in their home Region\n\t\t\t\tand Amazon Web Services GovCloud (US-West).

\n
\n

If you don't want Config to record all resources, you can specify which types of resources Config records with the resourceTypes parameter.

\n

For a list of supported resource types, see Supported Resource Types in the Config developer guide.

\n

For more information and a table of the Home Regions for Global Resource Types Onboarded after February 2022, see Selecting Which Resources Config Records in the Config developer guide.

" + "smithy.api#documentation": "

Specifies which resource types Config\n\t\t\trecords for configuration changes. By default, Config records configuration changes for all current and future supported resource types in the Amazon Web Services Region where you have enabled Config\n\t\t\t(excluding the globally recorded IAM resource types: IAM users, groups, roles, and customer managed policies).

\n

In the recording group, you specify whether you want to record all supported current and future supported resource types or to include or exclude specific resources types.\n\t\t\tFor a list of supported resource types, see Supported Resource Types in the Config developer guide.

\n

If you don't want Config to record all current and future supported resource types, use one of the following recording strategies:

\n
    \n
  1. \n

    \n Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

    \n
  2. \n
  3. \n

    \n Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

    \n
  4. \n
\n \n

\n Aurora global clusters are automatically globally recorded\n

\n

The AWS::RDS::GlobalCluster resource type\n\t\t\t\twill be recorded in all supported Config Regions where the configuration recorder is enabled.

\n

If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use the EXCLUSION_BY_RESOURCE_TYPES or INCLUSION_BY_RESOURCE_TYPES recording strategy.

\n
" } }, "com.amazonaws.configservice#RecordingStrategy": { @@ -10659,7 +10659,7 @@ "useOnly": { "target": "com.amazonaws.configservice#RecordingStrategyType", "traits": { - "smithy.api#documentation": "

The recording strategy for the configuration recorder.

\n
    \n
  • \n

    If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true.

    \n

    When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. For a list of supported resource types,\n\t\t\t\tsee Supported Resource Types in the Config developer guide.

    \n
  • \n
  • \n

    If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records\n\t\t\t\t\tconfiguration changes for only the resource types that you specify in the\n\t\t\t\t\t\tresourceTypes field of RecordingGroup.

    \n
  • \n
  • \n

    If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records\n\t\t\t\t\tconfiguration changes for all supported resource types, except the resource\n\t\t\t\t\ttypes that you specify as exemptions to exclude from being recorded in the\n\t\t\t\t\t\tresourceTypes field of ExclusionByResourceTypes.

    \n
  • \n
\n \n

The recordingStrategy field is optional when you set the\n\t\t\tallSupported field of RecordingGroup to true.

\n

The recordingStrategy field is optional when you list resource types in the\n\t\t\t\tresourceTypes field of RecordingGroup.

\n

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

\n
\n \n

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

\n

For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically\n\t\t\trecorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes.

\n

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy,\n\t\t\t\twhen Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types,\n\t\t\t\tConfig starts recording resources of that type automatically.

\n
" + "smithy.api#documentation": "

The recording strategy for the configuration recorder.

\n
    \n
  • \n

    If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regionally recorded resource types.\n\t\t\t\tYou also must set the allSupported field of RecordingGroup to true.\n\t\t\t\tWhen Config adds support for a new regionally recorded resource type, Config automatically starts recording resources of that type. For a list of supported resource types,\n\t\t\t\tsee Supported Resource Types in the Config developer guide.

    \n
  • \n
  • \n

    If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records\n\t\t\t\t\tconfiguration changes for only the resource types that you specify in the\n\t\t\t\t\t\tresourceTypes field of RecordingGroup.

    \n
  • \n
  • \n

    If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records\n\t\t\t\t\tconfiguration changes for all supported resource types, except the resource\n\t\t\t\t\ttypes that you specify to exclude from being recorded in the\n\t\t\t\t\t\tresourceTypes field of ExclusionByResourceTypes.

    \n
  • \n
\n \n

\n Required and optional fields\n

\n

The recordingStrategy field is optional when you set the\n\t\t\tallSupported field of RecordingGroup to true.

\n

The recordingStrategy field is optional when you list resource types in the\n\t\t\t\tresourceTypes field of RecordingGroup.

\n

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

\n
\n \n

\n Overriding fields\n

\n

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

\n

For example, even if you set includeGlobalResourceTypes to false, globally recorded IAM resource types will still be automatically\n\t\t\trecorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

\n
\n \n

\n Global resource types and the exclusion recording strategy\n

\n

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy,\n\t\t\t\twhen Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types,\n\t\t\t\tConfig starts recording resources of that type automatically.

\n

In addition, unless specifically listed as exclusions,\n\t\t\t\tAWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.\n\t\t\t\tIAM users, groups, roles, and customer managed policies will be recorded automatically in all enabled Config Regions where Config was available before February 2022.\n\t\t\t\tThis list does not include the following Regions:

\n
    \n
  • \n

    Asia Pacific (Hyderabad)

    \n
  • \n
  • \n

    Asia Pacific (Melbourne)

    \n
  • \n
  • \n

    Europe (Spain)

    \n
  • \n
  • \n

    Europe (Zurich)

    \n
  • \n
  • \n

    Israel (Tel Aviv)

    \n
  • \n
  • \n

    Middle East (UAE)

    \n
  • \n
\n
" } } }, @@ -13669,6 +13669,120 @@ "traits": { "smithy.api#enumValue": "AWS::NetworkManager::LinkAssociation" } + }, + "IoTWirelessMulticastGroup": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::IoTWireless::MulticastGroup" + } + }, + "PersonalizeDatasetGroup": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::Personalize::DatasetGroup" + } + }, + "IoTTwinMakerComponentType": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::IoTTwinMaker::ComponentType" + } + }, + "CodeBuildReportGroup": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::CodeBuild::ReportGroup" + } + }, + "SageMakerFeatureGroup": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::SageMaker::FeatureGroup" + } + }, + "MSKBatchScramSecret": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::MSK::BatchScramSecret" + } + }, + "AppStreamStack": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::AppStream::Stack" + } + }, + "IoTJobTemplate": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::IoT::JobTemplate" + } + }, + "IoTWirelessFuotaTask": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::IoTWireless::FuotaTask" + } + }, + "IoTProvisioningTemplate": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::IoT::ProvisioningTemplate" + } + }, + "InspectorV2Filter": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::InspectorV2::Filter" + } + }, + "Route53ResolverResolverQueryLoggingConfigAssociation": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::Route53Resolver::ResolverQueryLoggingConfigAssociation" + } + }, + "ServiceDiscoveryInstance": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::ServiceDiscovery::Instance" + } + }, + "TransferCertificate": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::Transfer::Certificate" + } + }, + "MediaConnectFlowSource": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::MediaConnect::FlowSource" + } + }, + "APSRuleGroupsNamespace": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::APS::RuleGroupsNamespace" + } + }, + "CodeGuruProfilerProfilingGroup": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::CodeGuruProfiler::ProfilingGroup" + } + }, + "Route53ResolverResolverQueryLoggingConfig": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::Route53Resolver::ResolverQueryLoggingConfig" + } + }, + "BatchSchedulingPolicy": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS::Batch::SchedulingPolicy" + } } } }, diff --git a/models/controltower.json b/models/controltower.json index 18e76c57a7..abddf4bc5a 100644 --- a/models/controltower.json +++ b/models/controltower.json @@ -14,6 +14,9 @@ { "target": "com.amazonaws.controltower#GetControlOperation" }, + { + "target": "com.amazonaws.controltower#GetEnabledControl" + }, { "target": "com.amazonaws.controltower#ListEnabledControls" } @@ -49,7 +52,7 @@ "x-amzn-trace-id" ] }, - "smithy.api#documentation": "

These interfaces allow you to apply the AWS library of pre-defined controls to your\norganizational units, programmatically. In this context, controls are the same as AWS Control Tower guardrails.

\n

To call these APIs, you'll need to know:

\n
    \n
  • \n

    the ControlARN for the control--that is, the\n guardrail--you are targeting,

    \n
  • \n
  • \n

    and the ARN associated with the target organizational unit (OU).

    \n
  • \n
\n

\n To get the ControlARN for your AWS Control Tower guardrail:\n

\n

The ControlARN contains the control name which is specified in each guardrail. For a list of control names for Strongly recommended and Elective guardrails, see Resource identifiers for APIs and guardrails in the Automating tasks section of the AWS Control Tower User Guide. Remember that Mandatory guardrails cannot be added or removed.

\n \n

\n ARN format: \n arn:aws:controltower:{REGION}::control/{CONTROL_NAME}\n

\n

\n Example:\n

\n

\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED\n

\n
\n

\n To get the ARN for an OU:\n

\n

In the AWS Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

\n \n

\n OU ARN format:\n

\n

\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}\n

\n
\n

\n Details and examples\n

\n \n

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n

\n

\n Recording API Requests\n

\n

AWS Control Tower supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the AWS Control Tower service received, who made the request and when, and so on. For more about AWS Control Tower and its support for CloudTrail, see Logging AWS Control Tower Actions with AWS CloudTrail in the AWS Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

", + "smithy.api#documentation": "

These interfaces allow you to apply the AWS library of pre-defined\n controls to your organizational units, programmatically. In AWS Control Tower, the terms \"control\" and \"guardrail\" are synonyms. .

\n

To call these APIs, you'll need to know:

\n
    \n
  • \n

    the controlIdentifier for the control--or guardrail--you are targeting.

    \n
  • \n
  • \n

    the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

    \n
  • \n
\n

\n To get the controlIdentifier for your AWS Control Tower\n control:\n

\n

The controlIdentifier is an ARN that is specified for each\n control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

\n

The controlIdentifier is unique in each AWS Region for each control. You can\n find the controlIdentifier for each Region and control in the Tables of control metadata in the AWS Control Tower User Guide.\n

\n

A quick-reference list of control identifers for the AWS Control Tower legacy Strongly recommended and\n Elective controls is given in Resource identifiers for\n APIs and guardrails in the Controls reference guide section\n of the AWS Control Tower User Guide. Remember that Mandatory controls\n cannot be added or removed.

\n \n

\n ARN format:\n arn:aws:controltower:{REGION}::control/{CONTROL_NAME}\n

\n

\n Example:\n

\n

\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED\n

\n
\n

\n To get the targetIdentifier:\n

\n

The targetIdentifier is the ARN for an OU.

\n

In the AWS Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

\n \n

\n OU ARN format:\n

\n

\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}\n

\n
\n

\n Details and examples\n

\n \n

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n

\n

\n Recording API Requests\n

\n

AWS Control Tower supports AWS CloudTrail, a service that records AWS API calls for your\n AWS account and delivers log files to an Amazon S3 bucket. By using information collected by\n CloudTrail, you can determine which requests the AWS Control Tower service received, who made\n the request and when, and so on. For more about AWS Control Tower and its support for\n CloudTrail, see Logging AWS Control Tower\n Actions with AWS CloudTrail in the AWS Control Tower User Guide. To learn more about\n CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User\n Guide.

", "smithy.api#title": "AWS Control Tower", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -913,11 +916,21 @@ } }, "traits": { - "smithy.api#documentation": "

User does not have sufficient access to perform this action. \n

", + "smithy.api#documentation": "

User does not have sufficient access to perform this action.

", "smithy.api#error": "client", "smithy.api#httpError": 403 } }, + "com.amazonaws.controltower#Arn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[0-9a-zA-Z_\\-:\\/]+$" + } + }, "com.amazonaws.controltower#ConflictException": { "type": "structure", "members": { @@ -976,7 +989,7 @@ "statusMessage": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

If the operation result is FAILED, this string contains a message explaining why the operation failed.

" + "smithy.api#documentation": "

If the operation result is FAILED, this string contains a message explaining\n why the operation failed.

" } } }, @@ -1050,7 +1063,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified\n organizational unit and the accounts it contains. The resources will vary according to the\n control that you specify.

", + "smithy.api#documentation": "

This API call turns off a control. It starts an asynchronous operation that deletes AWS\n resources on the specified organizational unit and the accounts it contains. The resources\n will vary according to the control that you specify. For usage examples, see \n the AWS Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1064,14 +1077,14 @@ "controlIdentifier": { "target": "com.amazonaws.controltower#ControlIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the control. Only Strongly recommended and Elective controls are permitted,\n with the exception of the Region deny guardrail.

", + "smithy.api#documentation": "

The ARN of the control. Only Strongly recommended and\n Elective controls are permitted, with the exception of the\n Region deny control. For information on how to find the controlIdentifier, see the overview page.

", "smithy.api#required": {} } }, "targetIdentifier": { "target": "com.amazonaws.controltower#TargetIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the organizational unit.

", + "smithy.api#documentation": "

The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page.

", "smithy.api#required": {} } } @@ -1089,6 +1102,43 @@ } } }, + "com.amazonaws.controltower#DriftStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "DRIFTED", + "value": "DRIFTED" + }, + { + "name": "IN_SYNC", + "value": "IN_SYNC" + }, + { + "name": "NOT_CHECKING", + "value": "NOT_CHECKING" + }, + { + "name": "UNKNOWN", + "value": "UNKNOWN" + } + ] + } + }, + "com.amazonaws.controltower#DriftStatusSummary": { + "type": "structure", + "members": { + "driftStatus": { + "target": "com.amazonaws.controltower#DriftStatus", + "traits": { + "smithy.api#documentation": "

The drift status of the enabled control.

\n

Valid values:

\n
    \n
  • \n

    \n DRIFTED: The enabledControl deployed in this configuration\n doesn’t match the configuration that AWS Control Tower expected.

    \n
  • \n
  • \n

    \n IN_SYNC: The enabledControl deployed in this configuration matches\n the configuration that AWS Control Tower expected.

    \n
  • \n
  • \n

    \n NOT_CHECKING: AWS Control Tower does not check drift for this enabled\n control. Drift is not supported for the control type.

    \n
  • \n
  • \n

    \n UNKNOWN: AWS Control Tower is not able to check the drift status for the\n enabled control.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The drift summary of the enabled control.

\n

AWS Control Tower expects the enabled control\n configuration to include all supported and governed Regions. If the enabled control differs\n from the expected configuration, it is defined to be in a state of drift. You can repair this drift by resetting the enabled control.

" + } + }, "com.amazonaws.controltower#EnableControl": { "type": "operation", "input": { @@ -1121,7 +1171,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API call activates a control. It starts an asynchronous operation that creates AWS resources on the specified\n organizational unit and the accounts it contains. The resources created will vary according to\n the control that you specify.

", + "smithy.api#documentation": "

This API call activates a control. It starts an asynchronous operation that creates AWS\n resources on the specified organizational unit and the accounts it contains. The resources\n created will vary according to the control that you specify. For usage examples, see \n the AWS Control Tower User Guide\n \n

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1135,14 +1185,14 @@ "controlIdentifier": { "target": "com.amazonaws.controltower#ControlIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the control. Only Strongly recommended and Elective controls are permitted,\n with the exception of the Region deny guardrail.

", + "smithy.api#documentation": "

The ARN of the control. Only Strongly recommended and\n Elective controls are permitted, with the exception of the\n Region deny control. For information on how to find the controlIdentifier, see the overview page.

", "smithy.api#required": {} } }, "targetIdentifier": { "target": "com.amazonaws.controltower#TargetIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the organizational unit.

", + "smithy.api#documentation": "

The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page.

", "smithy.api#required": {} } } @@ -1160,13 +1210,81 @@ } } }, + "com.amazonaws.controltower#EnabledControlDetails": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.controltower#Arn", + "traits": { + "smithy.api#documentation": "

\n The ARN of the enabled control.\n

" + } + }, + "controlIdentifier": { + "target": "com.amazonaws.controltower#ControlIdentifier", + "traits": { + "smithy.api#documentation": "

\n The control identifier of the enabled control. For information on how to find the controlIdentifier, see the overview page.\n

" + } + }, + "targetIdentifier": { + "target": "com.amazonaws.controltower#TargetIdentifier", + "traits": { + "smithy.api#documentation": "

\n The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page.\n

" + } + }, + "targetRegions": { + "target": "com.amazonaws.controltower#TargetRegions", + "traits": { + "smithy.api#documentation": "

\n Target AWS Regions for the enabled control.\n

" + } + }, + "statusSummary": { + "target": "com.amazonaws.controltower#EnablementStatusSummary", + "traits": { + "smithy.api#documentation": "

\n The deployment summary of the enabled control.\n

" + } + }, + "driftStatusSummary": { + "target": "com.amazonaws.controltower#DriftStatusSummary", + "traits": { + "smithy.api#documentation": "

\n The drift status of the enabled control.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Information about the enabled control.\n

" + } + }, "com.amazonaws.controltower#EnabledControlSummary": { "type": "structure", "members": { "controlIdentifier": { "target": "com.amazonaws.controltower#ControlIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the control. Only Strongly recommended and Elective controls are permitted,\n with the exception of the Region deny guardrail.

" + "smithy.api#documentation": "

The ARN of the control. Only Strongly recommended and\n Elective controls are permitted, with the exception of the\n Region deny control. For information on how to find the controlIdentifier, see the overview page.

" + } + }, + "arn": { + "target": "com.amazonaws.controltower#Arn", + "traits": { + "smithy.api#documentation": "

\n The ARN of the enabled control.\n

" + } + }, + "targetIdentifier": { + "target": "com.amazonaws.controltower#TargetIdentifier", + "traits": { + "smithy.api#documentation": "

\n The ARN of the organizational unit.\n

" + } + }, + "statusSummary": { + "target": "com.amazonaws.controltower#EnablementStatusSummary", + "traits": { + "smithy.api#documentation": "" + } + }, + "driftStatusSummary": { + "target": "com.amazonaws.controltower#DriftStatusSummary", + "traits": { + "smithy.api#documentation": "

\n The drift status of the enabled control.\n

" } } }, @@ -1180,6 +1298,45 @@ "target": "com.amazonaws.controltower#EnabledControlSummary" } }, + "com.amazonaws.controltower#EnablementStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "SUCCEEDED", + "value": "SUCCEEDED" + }, + { + "name": "FAILED", + "value": "FAILED" + }, + { + "name": "UNDER_CHANGE", + "value": "UNDER_CHANGE" + } + ] + } + }, + "com.amazonaws.controltower#EnablementStatusSummary": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.controltower#EnablementStatus", + "traits": { + "smithy.api#documentation": "

The deployment status of the enabled control.

\n

Valid values:

\n
    \n
  • \n

    \n SUCCEEDED: The enabledControl configuration was deployed successfully.

    \n
  • \n
  • \n

    \n UNDER_CHANGE: The enabledControl configuration is changing.

    \n
  • \n
  • \n

    \n FAILED: The enabledControl configuration failed to deploy.

    \n
  • \n
" + } + }, + "lastOperationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier", + "traits": { + "smithy.api#documentation": "

\n The last operation identifier for the enabled control.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The deployment summary of the enabled control. \n

" + } + }, "com.amazonaws.controltower#GetControlOperation": { "type": "operation", "input": { @@ -1206,7 +1363,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the status of a particular EnableControl or\n DisableControl operation. Displays a message in case of error.\n Details for an operation are available for 90 days.

", + "smithy.api#documentation": "

Returns the status of a particular EnableControl or\n DisableControl operation. Displays a message in case of error. Details for an\n operation are available for 90 days. For usage examples, see \n the AWS Control Tower User Guide\n \n

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1233,10 +1390,75 @@ "controlOperation": { "target": "com.amazonaws.controltower#ControlOperation", "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

An operation performed by the control.

", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.controltower#GetEnabledControl": { + "type": "operation", + "input": { + "target": "com.amazonaws.controltower#GetEnabledControlInput" + }, + "output": { + "target": "com.amazonaws.controltower#GetEnabledControlOutput" + }, + "errors": [ + { + "target": "com.amazonaws.controltower#AccessDeniedException" + }, + { + "target": "com.amazonaws.controltower#InternalServerException" + }, + { + "target": "com.amazonaws.controltower#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.controltower#ThrottlingException" + }, + { + "target": "com.amazonaws.controltower#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Provides details about the enabled control. For usage examples, see \n the AWS Control Tower User Guide\n .

\n

\n Returned values\n

\n
    \n
  • \n

    TargetRegions: Shows target AWS Regions where the enabled control is available to be deployed.

    \n
  • \n
  • \n

    StatusSummary: Provides a detailed summary of the deployment status.

    \n
  • \n
  • \n

    DriftSummary: Provides a detailed summary of the drifted status.

    \n
  • \n
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/get-enabled-control" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.controltower#GetEnabledControlInput": { + "type": "structure", + "members": { + "enabledControlIdentifier": { + "target": "com.amazonaws.controltower#Arn", + "traits": { + "smithy.api#documentation": "

\n The ARN of the enabled control.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.controltower#GetEnabledControlOutput": { + "type": "structure", + "members": { + "enabledControlDetails": { + "target": "com.amazonaws.controltower#EnabledControlDetails", + "traits": { + "smithy.api#documentation": "

\n Information about the enabled control.\n

", "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.controltower#InternalServerException": { @@ -1282,7 +1504,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the controls enabled by AWS Control Tower on the specified organizational unit and\n the accounts it contains.

", + "smithy.api#documentation": "

Lists the controls enabled by AWS Control Tower on the specified organizational unit and\n the accounts it contains. For usage examples, see \n the AWS Control Tower User Guide\n \n

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1303,7 +1525,7 @@ "targetIdentifier": { "target": "com.amazonaws.controltower#TargetIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the organizational unit.

", + "smithy.api#documentation": "

The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page.

", "smithy.api#required": {} } }, @@ -1344,7 +1566,7 @@ "traits": { "smithy.api#range": { "min": 1, - "max": 100 + "max": 200 } } }, @@ -1358,6 +1580,29 @@ "smithy.api#pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" } }, + "com.amazonaws.controltower#Region": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.controltower#RegionName", + "traits": { + "smithy.api#documentation": "

\n The AWS Region name.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An AWS Region in which AWS Control Tower expects to find the control deployed.

\n

The expected Regions are based on the Regions that are governed by the landing zone. In\n certain cases, a control is not actually enabled in the Region as expected, such as during\n drift, or mixed governance.

" + } + }, + "com.amazonaws.controltower#RegionName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, "com.amazonaws.controltower#ResourceNotFoundException": { "type": "structure", "members": { @@ -1385,7 +1630,7 @@ } }, "traits": { - "smithy.api#documentation": "

Request would cause a service quota to be exceeded. The limit is 10 concurrent operations.

", + "smithy.api#documentation": "

Request would cause a service quota to be exceeded. The limit is 10 concurrent operations.

", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -1400,6 +1645,12 @@ "smithy.api#pattern": "^arn:aws[0-9a-zA-Z_\\-:\\/]+$" } }, + "com.amazonaws.controltower#TargetRegions": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#Region" + } + }, "com.amazonaws.controltower#ThrottlingException": { "type": "structure", "members": { diff --git a/models/customer-profiles.json b/models/customer-profiles.json index 5cff01d9a2..fde199ea75 100644 --- a/models/customer-profiles.json +++ b/models/customer-profiles.json @@ -600,6 +600,9 @@ "type": "list", "member": { "target": "com.amazonaws.customerprofiles#ListCalculatedAttributeDefinitionItem" + }, + "traits": { + "smithy.api#sensitive": {} } }, "com.amazonaws.customerprofiles#CalculatedAttributesForProfileList": { @@ -796,7 +799,7 @@ } }, "Description": { - "target": "com.amazonaws.customerprofiles#text", + "target": "com.amazonaws.customerprofiles#sensitiveText", "traits": { "smithy.api#documentation": "

The description of the calculated attribute.

" } @@ -848,7 +851,7 @@ } }, "Description": { - "target": "com.amazonaws.customerprofiles#text", + "target": "com.amazonaws.customerprofiles#sensitiveText", "traits": { "smithy.api#documentation": "

The description of the calculated attribute.

" } @@ -3788,7 +3791,7 @@ } }, "Description": { - "target": "com.amazonaws.customerprofiles#text", + "target": "com.amazonaws.customerprofiles#sensitiveText", "traits": { "smithy.api#documentation": "

The description of the calculated attribute.

" } @@ -5498,7 +5501,7 @@ } }, "Description": { - "target": "com.amazonaws.customerprofiles#text", + "target": "com.amazonaws.customerprofiles#sensitiveText", "traits": { "smithy.api#documentation": "

The threshold for the calculated attribute.

" } @@ -9776,7 +9779,7 @@ } }, "Description": { - "target": "com.amazonaws.customerprofiles#text", + "target": "com.amazonaws.customerprofiles#sensitiveText", "traits": { "smithy.api#documentation": "

The description of the calculated attribute.

" } @@ -9808,7 +9811,7 @@ } }, "Description": { - "target": "com.amazonaws.customerprofiles#text", + "target": "com.amazonaws.customerprofiles#sensitiveText", "traits": { "smithy.api#documentation": "

The description of the calculated attribute.

" } diff --git a/models/datazone.json b/models/datazone.json new file mode 100644 index 0000000000..a5b24245e0 --- /dev/null +++ b/models/datazone.json @@ -0,0 +1,20850 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.datazone#AcceptChoice": { + "type": "structure", + "members": { + "predictionTarget": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Specifies the target (for example, a column name) where a prediction can be\n accepted.

" + } + }, + "predictionChoice": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Specifies the prediction (aka, the automatically generated piece of metadata) that can\n be accepted.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the prediction (aka, the automatically generated piece of metadata) and the\n target (for example, a column name) that can be accepted.

" + } + }, + "com.amazonaws.datazone#AcceptChoices": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AcceptChoice" + } + }, + "com.amazonaws.datazone#AcceptPredictions": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#AcceptPredictionsInput" + }, + "output": { + "target": "com.amazonaws.datazone#AcceptPredictionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Accepts automatically generated business-friendly metadata for your Amazon DataZone\n assets.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}/accept-predictions" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#AcceptPredictionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpQuery": "revision" + } + }, + "acceptRule": { + "target": "com.amazonaws.datazone#AcceptRule", + "traits": { + "smithy.api#documentation": "

Specifies the rule (or the conditions) under which a prediction can be accepted.

" + } + }, + "acceptChoices": { + "target": "com.amazonaws.datazone#AcceptChoices", + "traits": { + "smithy.api#documentation": "

" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure idempotency of the request. This field is\n automatically populated if not provided.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#AcceptPredictionsOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#AcceptRule": { + "type": "structure", + "members": { + "rule": { + "target": "com.amazonaws.datazone#AcceptRuleBehavior", + "traits": { + "smithy.api#documentation": "

Specifies whether you want to accept the top prediction for all targets or none.

" + } + }, + "threshold": { + "target": "smithy.api#Float", + "traits": { + "smithy.api#documentation": "

The confidence score that specifies the condition at which a prediction can be\n accepted.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the rule and the threshold under which a prediction can be accepted.

" + } + }, + "com.amazonaws.datazone#AcceptRuleBehavior": { + "type": "enum", + "members": { + "ALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.datazone#AcceptSubscriptionRequest": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#AcceptSubscriptionRequestInput" + }, + "output": { + "target": "com.amazonaws.datazone#AcceptSubscriptionRequestOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Accepts a subscription request to a specific asset.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}/accept" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#AcceptSubscriptionRequestInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone domain where the specified subscription request is being accepted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the subscription request that is to be accepted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

A description that specifies the reason for accepting the specified subscription\n request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#AcceptSubscriptionRequestOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon DataZone user that accepted the specified subscription request.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon DataZone user who updated the subscription request.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone domain where the specified subscription request\n was accepted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

Specifies the status of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp that specifies when the subscription request was accepted.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

Specifies the timestamp when subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

Specifies the reason for requesting a subscription to the asset.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon DataZone users who are subscribed to the asset specified in the\n subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

Specifies the asset for which the subscription request was created.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Specifes the ID of the Amazon DataZone user who reviewed the subscription request.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

Specifies the reason for accepting the subscription request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.datazone#ActionLink": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#ApplicableAssetTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#TypeName" + } + }, + "com.amazonaws.datazone#Asset": { + "type": "resource", + "identifiers": { + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#AssetId" + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "revision": { + "target": "com.amazonaws.datazone#Revision" + }, + "externalIdentifier": { + "target": "com.amazonaws.datazone#ExternalIdentifier" + }, + "listing": { + "target": "com.amazonaws.datazone#AssetListingDetails" + }, + "name": { + "target": "com.amazonaws.datazone#AssetName" + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier" + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision" + }, + "description": { + "target": "com.amazonaws.datazone#Description" + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms" + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList" + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList" + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList" + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateAsset" + }, + "read": { + "target": "com.amazonaws.datazone#GetAsset" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteAsset" + }, + "operations": [ + { + "target": "com.amazonaws.datazone#CreateAssetRevision" + } + ] + }, + "com.amazonaws.datazone#AssetId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#AssetIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#AssetItem": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the inventory asset exists.

", + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

the identifier of the Amazon DataZone inventory asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone inventory asset.

", + "smithy.api#required": {} + } + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the asset type of the specified Amazon DataZone inventory asset.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the inventory asset type.

", + "smithy.api#required": {} + } + }, + "externalIdentifier": { + "target": "com.amazonaws.datazone#ExternalIdentifier", + "traits": { + "smithy.api#documentation": "

The external identifier of the Amazon DataZone inventory asset.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of an Amazon DataZone inventory asset.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Amazon DataZone inventory asset was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the inventory asset.

" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the first revision of the inventory asset was created.

" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the first revision of the inventory asset.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms attached to the Amazon DataZone inventory asset.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project that owns the inventory asset.

", + "smithy.api#required": {} + } + }, + "additionalAttributes": { + "target": "com.amazonaws.datazone#AssetItemAdditionalAttributes", + "traits": { + "smithy.api#documentation": "

The additional attributes of a Amazon DataZone inventory asset.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A Amazon DataZone inventory asset.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.datazone#Asset" + } + ] + } + }, + "com.amazonaws.datazone#AssetItemAdditionalAttributes": { + "type": "structure", + "members": { + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The forms included in the additional attributes of an inventory asset.

" + } + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The read-only forms included in the additional attributes of an inventory asset.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The additional attributes of an inventory asset.

" + } + }, + "com.amazonaws.datazone#AssetListing": { + "type": "structure", + "members": { + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of an asset published in an Amazon DataZone catalog.

" + } + }, + "assetRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of an asset published in an Amazon DataZone catalog.

" + } + }, + "assetType": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The type of an asset published in an Amazon DataZone catalog.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when an asset published in an Amazon DataZone catalog was created.

" + } + }, + "forms": { + "target": "com.amazonaws.datazone#Forms", + "traits": { + "smithy.api#documentation": "

The metadata forms attached to an asset published in an Amazon DataZone catalog.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms attached to an asset published in an Amazon DataZone catalog.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project where an asset published in an Amazon DataZone catalog exists.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An asset published in an Amazon DataZone catalog.

" + } + }, + "com.amazonaws.datazone#AssetListingDetails": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of an asset published in an Amazon DataZone catalog.

", + "smithy.api#required": {} + } + }, + "listingStatus": { + "target": "com.amazonaws.datazone#ListingStatus", + "traits": { + "smithy.api#documentation": "

The status of an asset published in an Amazon DataZone catalog.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of an asset published in an Amazon DataZone catalog.

" + } + }, + "com.amazonaws.datazone#AssetListingItem": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of the listing (asset published in Amazon DataZone catalog).

" + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the listing (asset published in Amazon DataZone catalog).

" + } + }, + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

The name of the inventory asset.

" + } + }, + "entityId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of the inventory asset.

" + } + }, + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the inventory asset.

" + } + }, + "entityType": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The type of the inventory asset.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of an asset published in an Amazon DataZone catalog.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when an asset published in an Amazon DataZone catalog was created.

" + } + }, + "listingCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the listing.

" + } + }, + "listingUpdatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the listing.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

Glossary terms attached to the inventory asset.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that owns the inventory asset.

" + } + }, + "additionalAttributes": { + "target": "com.amazonaws.datazone#AssetListingItemAdditionalAttributes", + "traits": { + "smithy.api#documentation": "

The additional attributes of an asset published in an Amazon DataZone catalog.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of an asset published in an Amazon DataZone catalog.

" + } + }, + "com.amazonaws.datazone#AssetListingItemAdditionalAttributes": { + "type": "structure", + "members": { + "forms": { + "target": "com.amazonaws.datazone#Forms", + "traits": { + "smithy.api#documentation": "

The metadata forms that form additional attributes of the metadata asset.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional attributes of an inventory asset.

" + } + }, + "com.amazonaws.datazone#AssetName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#AssetRevision": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the inventory asset.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of the inventory asset revision.

" + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision details of the inventory asset.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the asset revision.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when an inventory asset revison was created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The revision of an inventory asset.

" + } + }, + "com.amazonaws.datazone#AssetRevisions": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AssetRevision" + } + }, + "com.amazonaws.datazone#AssetTargetNameMap": { + "type": "structure", + "members": { + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of the inventory asset.

", + "smithy.api#required": {} + } + }, + "targetName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The target name in the asset target name map.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

" + } + }, + "com.amazonaws.datazone#AssetTargetNames": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AssetTargetNameMap" + } + }, + "com.amazonaws.datazone#AssetType": { + "type": "resource", + "identifiers": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier" + }, + "revision": { + "target": "com.amazonaws.datazone#Revision" + } + }, + "properties": { + "name": { + "target": "com.amazonaws.datazone#TypeName" + }, + "description": { + "target": "com.amazonaws.datazone#Description" + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormsInputMap" + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormsOutputMap" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt" + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateAssetType" + }, + "collectionOperations": [ + { + "target": "com.amazonaws.datazone#DeleteAssetType" + }, + { + "target": "com.amazonaws.datazone#GetAssetType" + } + ] + }, + "com.amazonaws.datazone#AssetTypeIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 385 + }, + "smithy.api#pattern": "^(?!\\.)[\\w\\.]*\\w$" + } + }, + "com.amazonaws.datazone#AssetTypeItem": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where the asset type exists.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The name of the asset type.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset type.

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormsOutputMap", + "traits": { + "smithy.api#documentation": "

The forms included in the details of the asset type.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project that owns the asset type.

", + "smithy.api#required": {} + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where the asset type was originally\n created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project where the asset type exists.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset type was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the asset type.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset type was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the asset type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the asset type.

" + } + }, + "com.amazonaws.datazone#Attribute": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.datazone#AuthType": { + "type": "enum", + "members": { + "IAM_IDC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IAM_IDC" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.datazone#AuthorizedPrincipalIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9:/_-]*$" + } + }, + "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifier" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.datazone#AwsAccountId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\d{12}$" + } + }, + "com.amazonaws.datazone#AwsRegion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-z]{2}-[a-z]{4,10}-\\d$" + } + }, + "com.amazonaws.datazone#BusinessNameGenerationConfiguration": { + "type": "structure", + "members": { + "enabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the business name generation is enabled.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the business name generation.

" + } + }, + "com.amazonaws.datazone#CancelSubscription": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CancelSubscriptionInput" + }, + "output": { + "target": "com.amazonaws.datazone#CancelSubscriptionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Cancels the subscription to the specified asset.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/subscriptions/{identifier}/cancel" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CancelSubscriptionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone domain where the subscription request is being\n cancelled.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the subscription that is being cancelled.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CancelSubscriptionOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon DataZone user who is cancelling the subscription.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user that cancelled the subscription.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone domain where the subscription is being\n cancelled.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of the request to cancel the subscription.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp that specifies when the request to cancel the subscription was\n created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp that specifies when the subscription was cancelled.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipal": { + "target": "com.amazonaws.datazone#SubscribedPrincipal", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who is made a subscriber to the specified asset by the subscription\n that is being cancelled.

", + "smithy.api#required": {} + } + }, + "subscribedListing": { + "target": "com.amazonaws.datazone#SubscribedListing", + "traits": { + "smithy.api#documentation": "

The asset to which a subscription is being cancelled.

", + "smithy.api#required": {} + } + }, + "subscriptionRequestId": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The unique ID of the subscripton request for the subscription that is being\n cancelled.

" + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the permissions to the asset are retained after the subscription is\n cancelled.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ChangeAction": { + "type": "enum", + "members": { + "PUBLISH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISH" + } + }, + "UNPUBLISH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNPUBLISH" + } + } + } + }, + "com.amazonaws.datazone#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\x21-\\x7E]+$" + } + }, + "com.amazonaws.datazone#CloudFormationProperties": { + "type": "structure", + "members": { + "templateUrl": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The template URL of the cloud formation provisioning properties of the environment\n blueprint.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Part of the provisioning properties of the environment blueprint.

" + } + }, + "com.amazonaws.datazone#ConfigurableActionParameter": { + "type": "structure", + "members": { + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The key of the configurable action parameter.

" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value of the configurable action parameter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the parameters for the configurable environment action.

" + } + }, + "com.amazonaws.datazone#ConfigurableActionParameterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ConfigurableActionParameter" + } + }, + "com.amazonaws.datazone#ConfigurableActionTypeAuthorization": { + "type": "enum", + "members": { + "IAM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IAM" + } + }, + "HTTPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HTTPS" + } + } + } + }, + "com.amazonaws.datazone#ConfigurableEnvironmentAction": { + "type": "structure", + "members": { + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of a configurable action in a Amazon DataZone environment.

", + "smithy.api#required": {} + } + }, + "auth": { + "target": "com.amazonaws.datazone#ConfigurableActionTypeAuthorization", + "traits": { + "smithy.api#documentation": "

The authentication type of a configurable action of a Amazon DataZone environment.

" + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ConfigurableActionParameterList", + "traits": { + "smithy.api#documentation": "

The parameters of a configurable action in a Amazon DataZone environment.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configurable action of a Amazon DataZone environment.

" + } + }, + "com.amazonaws.datazone#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

There is a conflict while performing this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.datazone#CreateAsset": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateAssetInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateAssetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an asset in Amazon DataZone catalog.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/assets" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateAssetInput": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

Asset name.

", + "smithy.api#required": {} + } + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

Amazon DataZone domain where the asset is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "externalIdentifier": { + "target": "com.amazonaws.datazone#ExternalIdentifier", + "traits": { + "smithy.api#documentation": "

" + } + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of this asset's type.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of this asset's type.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

Asset description.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

Glossary terms attached to the asset.

" + } + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList", + "traits": { + "smithy.api#documentation": "

Metadata forms attached to the asset.

" + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the project that owns this asset.

", + "smithy.api#required": {} + } + }, + "predictionConfiguration": { + "target": "com.amazonaws.datazone#PredictionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the automatically generated business-friendly metadata for the\n asset.

", + "smithy.api#notProperty": {} + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateAssetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the created asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

The name of the created asset.

", + "smithy.api#required": {} + } + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the created asset type.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision type of the asset.

", + "smithy.api#required": {} + } + }, + "externalIdentifier": { + "target": "com.amazonaws.datazone#ExternalIdentifier", + "traits": { + "smithy.api#documentation": "

" + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the created asset.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user that created this asset in the catalog.

" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the first revision of the asset took place.

" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user that made the first revision of the asset.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that are attached to the created asset.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns the created asset.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset was created.

", + "smithy.api#required": {} + } + }, + "listing": { + "target": "com.amazonaws.datazone#AssetListingDetails", + "traits": { + "smithy.api#documentation": "

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms that are attached to the created asset.

", + "smithy.api#required": {} + } + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The read-only metadata forms that are attached to the created asset.

" + } + }, + "predictionConfiguration": { + "target": "com.amazonaws.datazone#PredictionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the automatically generated business-friendly metadata for the\n asset.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateAssetRevision": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateAssetRevisionInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateAssetRevisionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a revision of the asset.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}/revisions" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateAssetRevisionInput": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

Te revised name of the asset.

", + "smithy.api#required": {} + } + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the domain where the asset is being revised.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision type of the asset.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The revised description of the asset.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms to be attached to the asset as part of asset revision.

" + } + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormInputList", + "traits": { + "smithy.api#documentation": "

The metadata forms to be attached to the asset as part of asset revision.

" + } + }, + "predictionConfiguration": { + "target": "com.amazonaws.datazone#PredictionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the automatically generated business-friendly metadata for the\n asset.

", + "smithy.api#notProperty": {} + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateAssetRevisionOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the asset revision.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

The revised name of the asset.

", + "smithy.api#required": {} + } + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the revision type.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision type of the asset.

", + "smithy.api#required": {} + } + }, + "externalIdentifier": { + "target": "com.amazonaws.datazone#ExternalIdentifier", + "traits": { + "smithy.api#documentation": "

" + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The revised asset description.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset revision occured.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who performed the asset revision.

" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the first asset revision occured.

" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who performed the first asset revision.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that were attached to the asset as part of asset revision.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the revised project that owns the asset.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone domain where the asset was revised.

", + "smithy.api#required": {} + } + }, + "listing": { + "target": "com.amazonaws.datazone#AssetListingDetails", + "traits": { + "smithy.api#documentation": "

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms that were attached to the asset as part of the asset revision.

", + "smithy.api#required": {} + } + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The read-only metadata forms that were attached to the asset as part of the asset\n revision.

" + } + }, + "predictionConfiguration": { + "target": "com.amazonaws.datazone#PredictionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the automatically generated business-friendly metadata for the\n asset.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateAssetType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateAssetTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateAssetTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a custom asset type.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/asset-types" + } + } + }, + "com.amazonaws.datazone#CreateAssetTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone domain where the custom asset type is being\n created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The name of the custom asset type.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The descripton of the custom asset type.

" + } + }, + "formsInput": { + "target": "com.amazonaws.datazone#FormsInputMap", + "traits": { + "smithy.api#documentation": "

The metadata forms that are to be attached to the custom asset type.

", + "smithy.api#required": {} + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project that is to own the custom asset type.

", + "smithy.api#property": { + "name": "owningProjectId" + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateAssetTypeOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type was created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The name of the asset type.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the custom asset type.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the custom asset type.

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormsOutputMap", + "traits": { + "smithy.api#documentation": "

The metadata forms that are attached to the asset type.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project that currently owns this asset type.

" + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the asset type was originally created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project where the asset type was originally created.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset type is to be created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who creates this custom asset type.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the custom type was created.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user that created the custom asset type.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateDataSource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateDataSourceInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateDataSourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon DataZone data source.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/data-sources" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateDataSourceInput": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the data source.

" + } + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the data source is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "projectIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project in which you want to add this data\n source.

", + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType", + "traits": { + "smithy.api#documentation": "

The type of the data source.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationInput", + "traits": { + "smithy.api#documentation": "

Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration or redshiftRunConfiguration.

", + "smithy.api#notProperty": {} + } + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", + "traits": { + "smithy.api#documentation": "

Specifies whether the business name generation is to be enabled for this data\n source.

" + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

Specifies whether the data source is enabled.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", + "traits": { + "smithy.api#documentation": "

The schedule of the data source runs.

" + } + }, + "publishOnImport": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" + } + }, + "assetFormsInput": { + "target": "com.amazonaws.datazone#FormInputList", + "traits": { + "smithy.api#documentation": "

The metadata forms that are to be attached to the assets that this data source works\n with.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateDataSourceOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType", + "traits": { + "smithy.api#documentation": "

The type of the data source.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the data source.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source is created.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project to which the data source is added.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the Amazon DataZone environment to which the data source publishes\n assets.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", + "traits": { + "smithy.api#documentation": "

Specifies the configuration of the data source. It can be set to either\n glueRunConfiguration or redshiftRunConfiguration.

", + "smithy.api#notProperty": {} + } + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", + "traits": { + "smithy.api#documentation": "

Specifies whether the business name generation is to be enabled for this data\n source.

" + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

Specifies whether the data source is enabled.

" + } + }, + "publishOnImport": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" + } + }, + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms attached to the assets that this data source creates.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", + "traits": { + "smithy.api#documentation": "

The schedule of the data source runs.

" + } + }, + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the last run of this data source.

" + } + }, + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp that specifies when the data source was last run.

" + } + }, + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was created.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateDomain": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateDomainInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateDomainOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon DataZone domain.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#CreateDomainInput": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" + } + }, + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", + "traits": { + "smithy.api#documentation": "

The single-sign on configuration of the Amazon DataZone domain.

" + } + }, + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" + } + }, + "tags": { + "target": "com.amazonaws.datazone#Tags", + "traits": { + "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateDomainOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone domain.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" + } + }, + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", + "traits": { + "smithy.api#documentation": "

The single-sign on configuration of the Amazon DataZone domain.

" + } + }, + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The domain execution role that is created when an Amazon DataZone domain is created. The\n domain execution role is created in the Amazon Web Services account that houses the\n Amazon DataZone domain.

" + } + }, + "arn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the Amazon DataZone domain.

" + } + }, + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#DomainStatus", + "traits": { + "smithy.api#documentation": "

The status of the Amazon DataZone domain.

" + } + }, + "portalUrl": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The URL of the data portal for this Amazon DataZone domain.

" + } + }, + "tags": { + "target": "com.amazonaws.datazone#Tags", + "traits": { + "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateEnvironment": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateEnvironmentInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateEnvironmentOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Create an Amazon DataZone environment.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments" + } + } + }, + "com.amazonaws.datazone#CreateEnvironmentInput": { + "type": "structure", + "members": { + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project in which this environment is created.

", + "smithy.api#required": {} + } + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone environment.

" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone environment.

", + "smithy.api#required": {} + } + }, + "environmentProfileIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment profile that is used to create this Amazon DataZone\n environment.

", + "smithy.api#required": {} + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", + "traits": { + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateEnvironmentOutput": { + "type": "structure", + "members": { + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment is created.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of this Amazon DataZone environment.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment is created.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created this environment.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentName", + "traits": { + "smithy.api#documentation": "

The name of this environment.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone environment.

" + } + }, + "environmentProfileId": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The ID of the environment profile with which this Amazon DataZone environment was\n created.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account in which the Amazon DataZone environment is created.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region in which the Amazon DataZone environment is created.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of this Amazon DataZone environment.

", + "smithy.api#required": {} + } + }, + "provisionedResources": { + "target": "com.amazonaws.datazone#ResourceList", + "traits": { + "smithy.api#documentation": "

The provisioned resources of this Amazon DataZone environment.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#EnvironmentStatus", + "traits": { + "smithy.api#documentation": "

The status of this Amazon DataZone environment.

" + } + }, + "environmentActions": { + "target": "com.amazonaws.datazone#EnvironmentActionList", + "traits": { + "smithy.api#documentation": "

The configurable actions of this Amazon DataZone environment.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" + } + }, + "lastDeployment": { + "target": "com.amazonaws.datazone#Deployment", + "traits": { + "smithy.api#documentation": "

The details of the last deployment of this Amazon DataZone environment.

" + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone environment.

" + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "

The deployment properties of this Amazon DataZone environment.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which this Amazon DataZone environment was created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateEnvironmentProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateEnvironmentProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateEnvironmentProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon DataZone environment profile.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environment-profiles" + } + } + }, + "com.amazonaws.datazone#CreateEnvironmentProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

The name of this Amazon DataZone environment profile.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone environment profile.

" + } + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", + "smithy.api#required": {} + } + }, + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project in which to create the environment profile.

", + "smithy.api#required": {} + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", + "traits": { + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment profile.

" + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account in which the Amazon DataZone environment is created.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region in which this environment profile is created.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateEnvironmentProfileOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The ID of this Amazon DataZone environment profile.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile is created.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID in which this Amazon DataZone environment profile is\n created.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region in which this Amazon DataZone environment profile is\n created.

" + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created this environment profile.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment profile was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment profile was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

The name of this Amazon DataZone environment profile.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone environment profile.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment profile is created.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment profile.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateFormType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateFormTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateFormTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a metadata form type.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/form-types" + } + } + }, + "com.amazonaws.datazone#CreateFormTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} + } + }, + "model": { + "target": "com.amazonaws.datazone#Model", + "traits": { + "smithy.api#documentation": "

The model of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns this metadata form type.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#FormTypeStatus", + "traits": { + "smithy.api#documentation": "

The status of this Amazon DataZone metadata form type.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone metadata form type.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateFormTypeOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type is created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of this Amazon DataZone metadata form type.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone metadata form type.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that owns this Amazon DataZone metadata form type.

" + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type was originally\n created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project in which this Amazon DataZone metadata form type was originally\n created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateGlossary": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateGlossaryInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateGlossaryOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon DataZone business glossary.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/glossaries" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateGlossaryInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName", + "traits": { + "smithy.api#documentation": "

The name of this business glossary.

", + "smithy.api#required": {} + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that currently owns business glossary.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The description of this business glossary.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The status of this business glossary.

" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateGlossaryOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary is created.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of this business glossary.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName", + "traits": { + "smithy.api#documentation": "

The name of this business glossary.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that currently owns this business glossary.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The description of this business glossary.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The status of this business glossary.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateGlossaryTerm": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateGlossaryTermInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateGlossaryTermOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a business glossary term.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/glossary-terms" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#CreateGlossaryTermInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "glossaryIdentifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary in which this term is created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name of this business glossary term.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status of this business glossary term.

" + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short description of this business glossary term.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description of this business glossary term.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The term relations of this business glossary term.

" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateGlossaryTermOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of this business glossary term.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term is created.

", + "smithy.api#required": {} + } + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary in which this term is created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name of this business glossary term.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status of this business glossary term.

", + "smithy.api#required": {} + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short description of this business glossary term.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description of this business glossary term.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The term relations of this business glossary term.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateGroupProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateGroupProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateGroupProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a group profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/group-profiles" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#CreateGroupProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "groupIdentifier": { + "target": "com.amazonaws.datazone#GroupIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the group for which the group profile is created.

", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateGroupProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile is created.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#GroupProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the group profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the group profile.

" + } + }, + "groupName": { + "target": "com.amazonaws.datazone#GroupProfileName", + "traits": { + "smithy.api#documentation": "

The name of the group for which group profile is created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateListingChangeSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateListingChangeSetInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateListingChangeSetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/listings/change-set" + } + } + }, + "com.amazonaws.datazone#CreateListingChangeSetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityIdentifier": { + "target": "com.amazonaws.datazone#EntityId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "entityType": { + "target": "com.amazonaws.datazone#EntityType", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

" + } + }, + "action": { + "target": "com.amazonaws.datazone#ChangeAction", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateListingChangeSetOutput": { + "type": "structure", + "members": { + "listingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#ListingStatus", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateProjectInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateProjectOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon DataZone project.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/projects" + } + } + }, + "com.amazonaws.datazone#CreateProjectInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this project is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone project.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone project.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone project.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateProjectMembership": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateProjectMembershipInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateProjectMembershipOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a project membership in Amazon DataZone.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/createMembership" + } + } + }, + "com.amazonaws.datazone#CreateProjectMembershipInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which project membership is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project for which this project membership was created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "member": { + "target": "com.amazonaws.datazone#Member", + "traits": { + "smithy.api#documentation": "

The project member whose project membership was created.

", + "smithy.api#required": {} + } + }, + "designation": { + "target": "com.amazonaws.datazone#UserDesignation", + "traits": { + "smithy.api#documentation": "

The designation of the project membership.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateProjectMembershipOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateProjectOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the project was created.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name of the project.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the project.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the project.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "lastUpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was last updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms that can be used in the project.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionGrant": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateSubscriptionGrantInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateSubscriptionGrantOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a subsscription grant in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/subscription-grants" + } + } + }, + "com.amazonaws.datazone#CreateSubscriptionGrantInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetIdentifier": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntityInput", + "traits": { + "smithy.api#documentation": "

The entity to which the subscription is to be granted.

", + "smithy.api#required": {} + } + }, + "assetTargetNames": { + "target": "com.amazonaws.datazone#AssetTargetNames", + "traits": { + "smithy.api#documentation": "

The names of the assets for which the subscription grant is created.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionGrantOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription grant.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription grant.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

A timestamp of when the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

A timestamp of when the subscription grant was updated.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", + "traits": { + "smithy.api#documentation": "

The entity to which the subscription is granted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription grant.

", + "smithy.api#required": {} + } + }, + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", + "traits": { + "smithy.api#documentation": "

The assets for which the subscription grant is created.

" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription grant.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionRequest": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateSubscriptionRequestInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateSubscriptionRequestOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a subscription request in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests" + } + } + }, + "com.amazonaws.datazone#CreateSubscriptionRequestInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription request is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipalInputs", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone principals for whom the subscription request is created.

", + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListingInputs", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionRequestOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription request.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription request.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in whcih the subscription request is created.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

A timestamp of when the subscription request is created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

The subscribed principals of the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the reviewer of the subscription request.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the subscription request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionTarget": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateSubscriptionTargetInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateSubscriptionTargetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a subscription target in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets" + } + } + }, + "com.amazonaws.datazone#CreateSubscriptionTargetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which subscription target is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which subscription target is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration of the subscription target.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorized principals of the subscription target.

", + "smithy.api#required": {} + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role that is used to create the subscription target.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The asset types that can be included in the subscription target.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the subscription target.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateSubscriptionTargetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorised principals of the subscription target.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

???

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription target.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription target.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was updated.

" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role with which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The asset types that can be included in the subscription target.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration of the subscription target.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the subscription target.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreateUserProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateUserProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateUserProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a user profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/user-profiles" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#CreateUserProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userIdentifier": { + "target": "com.amazonaws.datazone#UserIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the user for which the user profile is created.

", + "smithy.api#required": {} + } + }, + "userType": { + "target": "com.amazonaws.datazone#UserType", + "traits": { + "smithy.api#documentation": "

The user type of the user for which the user profile is created.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateUserProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is created.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#UserProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the user profile.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the user profile.

" + } + }, + "details": { + "target": "com.amazonaws.datazone#UserProfileDetails" + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#CreatedAt": { + "type": "timestamp" + }, + "com.amazonaws.datazone#CreatedBy": { + "type": "string" + }, + "com.amazonaws.datazone#CronString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "cron\\((\\b[0-5]?[0-9]\\b) (\\b2[0-3]\\b|\\b[0-1]?[0-9]\\b) (.*){1,5} (.*){1,5} (.*){1,5} (.*){1,5}\\)" + } + }, + "com.amazonaws.datazone#CustomParameter": { + "type": "structure", + "members": { + "keyName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The key name of the parameter.

", + "smithy.api#pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the parameter.

" + } + }, + "fieldType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The filed type of the parameter.

", + "smithy.api#required": {} + } + }, + "defaultValue": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The default value of the parameter.

" + } + }, + "isEditable": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the parameter is editable.

" + } + }, + "isOptional": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the custom parameter is optional.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of user parameters of an environment blueprint.

" + } + }, + "com.amazonaws.datazone#CustomParameterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#CustomParameter" + } + }, + "com.amazonaws.datazone#DataAssetActivityStatus": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "PUBLISHING_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHING_FAILED" + } + }, + "SUCCEEDED_CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED_CREATED" + } + }, + "SUCCEEDED_UPDATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED_UPDATED" + } + }, + "SKIPPED_ALREADY_IMPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_ALREADY_IMPORTED" + } + }, + "SKIPPED_ARCHIVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_ARCHIVED" + } + }, + "SKIPPED_NO_ACCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED_NO_ACCESS" + } + }, + "UNCHANGED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNCHANGED" + } + } + } + }, + "com.amazonaws.datazone#DataProductDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DataProductId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#DataProductItem": { + "type": "structure", + "members": { + "itemId": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

" + } + } + }, + "traits": { + "smithy.api#documentation": "

" + } + }, + "com.amazonaws.datazone#DataProductItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataProductItem" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.datazone#DataProductName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DataProductSummary": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#DataProductId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#DataProductName", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#DataProductDescription", + "traits": { + "smithy.api#documentation": "

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

" + } + }, + "dataProductItems": { + "target": "com.amazonaws.datazone#DataProductItems", + "traits": { + "smithy.api#documentation": "

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

" + } + } + }, + "traits": { + "smithy.api#documentation": "

" + } + }, + "com.amazonaws.datazone#DataSource": { + "type": "resource", + "identifiers": { + "identifier": { + "target": "com.amazonaws.datazone#DataSourceId" + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#DataSourceId" + }, + "name": { + "target": "com.amazonaws.datazone#Name" + }, + "description": { + "target": "com.amazonaws.datazone#Description" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId" + }, + "projectIdentifier": { + "target": "smithy.api#String" + }, + "environmentIdentifier": { + "target": "smithy.api#String" + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType" + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus" + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting" + }, + "publishOnImport": { + "target": "smithy.api#Boolean" + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration" + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration" + }, + "assetFormsInput": { + "target": "com.amazonaws.datazone#FormInputList" + }, + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList" + }, + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus" + }, + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime" + }, + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage" + }, + "lastRunAssetCount": { + "target": "smithy.api#Integer" + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage" + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime" + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateDataSource" + }, + "read": { + "target": "com.amazonaws.datazone#GetDataSource" + }, + "update": { + "target": "com.amazonaws.datazone#UpdateDataSource" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteDataSource" + }, + "list": { + "target": "com.amazonaws.datazone#ListDataSources" + } + }, + "com.amazonaws.datazone#DataSourceConfigurationInput": { + "type": "union", + "members": { + "glueRunConfiguration": { + "target": "com.amazonaws.datazone#GlueRunConfigurationInput", + "traits": { + "smithy.api#documentation": "

The configuration of the Amazon Web Services Glue data source.

" + } + }, + "redshiftRunConfiguration": { + "target": "com.amazonaws.datazone#RedshiftRunConfigurationInput", + "traits": { + "smithy.api#documentation": "

The configuration of the Amazon Redshift data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the data source.

" + } + }, + "com.amazonaws.datazone#DataSourceConfigurationOutput": { + "type": "union", + "members": { + "glueRunConfiguration": { + "target": "com.amazonaws.datazone#GlueRunConfigurationOutput", + "traits": { + "smithy.api#documentation": "

The configuration of the Amazon Web Services Glue data source.

" + } + }, + "redshiftRunConfiguration": { + "target": "com.amazonaws.datazone#RedshiftRunConfigurationOutput", + "traits": { + "smithy.api#documentation": "

The configuration of the Amazon Redshift data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the data source.

" + } + }, + "com.amazonaws.datazone#DataSourceErrorMessage": { + "type": "structure", + "members": { + "errorType": { + "target": "com.amazonaws.datazone#DataSourceErrorType", + "traits": { + "smithy.api#documentation": "

The type of the error message that is returned if the operation cannot be successfully\n completed.

", + "smithy.api#required": {} + } + }, + "errorDetail": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The details of the error message that is returned if the operation cannot be\n successfully completed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the error message that is returned if the operation cannot be\n successfully completed.

" + } + }, + "com.amazonaws.datazone#DataSourceErrorType": { + "type": "enum", + "members": { + "ACCESS_DENIED_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCESS_DENIED_EXCEPTION" + } + }, + "CONFLICT_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONFLICT_EXCEPTION" + } + }, + "INTERNAL_SERVER_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_SERVER_EXCEPTION" + } + }, + "RESOURCE_NOT_FOUND_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESOURCE_NOT_FOUND_EXCEPTION" + } + }, + "SERVICE_QUOTA_EXCEEDED_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVICE_QUOTA_EXCEEDED_EXCEPTION" + } + }, + "THROTTLING_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THROTTLING_EXCEPTION" + } + }, + "VALIDATION_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VALIDATION_EXCEPTION" + } + } + } + }, + "com.amazonaws.datazone#DataSourceId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#DataSourceRun": { + "type": "resource", + "identifiers": { + "identifier": { + "target": "com.amazonaws.datazone#DataSourceRunId" + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#DataSourceRunId" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "dataSourceIdentifier": { + "target": "com.amazonaws.datazone#DataSourceId" + }, + "dataSourceId": { + "target": "com.amazonaws.datazone#DataSourceId" + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceRunType" + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceRunStatus" + }, + "dataSourceConfigurationSnapshot": { + "target": "smithy.api#String" + }, + "runStatisticsForAssets": { + "target": "com.amazonaws.datazone#RunStatisticsForAssets" + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage" + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime" + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime" + }, + "startedAt": { + "target": "com.amazonaws.datazone#DateTime" + }, + "stoppedAt": { + "target": "com.amazonaws.datazone#DateTime" + } + }, + "create": { + "target": "com.amazonaws.datazone#StartDataSourceRun" + }, + "read": { + "target": "com.amazonaws.datazone#GetDataSourceRun" + }, + "list": { + "target": "com.amazonaws.datazone#ListDataSourceRuns" + } + }, + "com.amazonaws.datazone#DataSourceRunActivities": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataSourceRunActivity" + } + }, + "com.amazonaws.datazone#DataSourceRunActivity": { + "type": "structure", + "members": { + "database": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The database included in the data source run activity.

", + "smithy.api#required": {} + } + }, + "dataSourceRunId": { + "target": "com.amazonaws.datazone#DataSourceRunId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source for the data source run activity.

", + "smithy.api#required": {} + } + }, + "technicalName": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The technical name included in the data source run activity.

", + "smithy.api#required": {} + } + }, + "dataAssetStatus": { + "target": "com.amazonaws.datazone#DataAssetActivityStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset included in the data source run activity.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The project ID included in the data source run activity.

", + "smithy.api#required": {} + } + }, + "dataAssetId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the asset included in the data source run activity.

" + } + }, + "technicalDescription": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The technical description included in the data source run activity.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage" + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when data source run activity was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when data source run activity was updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The activity details of the data source run.

" + } + }, + "com.amazonaws.datazone#DataSourceRunId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#DataSourceRunStatus": { + "type": "enum", + "members": { + "REQUESTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REQUESTED" + } + }, + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "PARTIALLY_SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PARTIALLY_SUCCEEDED" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESS" + } + } + } + }, + "com.amazonaws.datazone#DataSourceRunSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataSourceRunSummary" + } + }, + "com.amazonaws.datazone#DataSourceRunSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DataSourceRunId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source run.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source of the data source run.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceRunType", + "traits": { + "smithy.api#documentation": "

The type of the data source run.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source run.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The project ID of the data source run.

", + "smithy.api#required": {} + } + }, + "runStatisticsForAssets": { + "target": "com.amazonaws.datazone#RunStatisticsForAssets" + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage" + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when a data source run was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when a data source run was updated.

", + "smithy.api#required": {} + } + }, + "startedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when a data source run was started.

" + } + }, + "stoppedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when a data source run was stopped.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a data source run.

" + } + }, + "com.amazonaws.datazone#DataSourceRunType": { + "type": "enum", + "members": { + "PRIORITIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRIORITIZED" + } + }, + "SCHEDULED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCHEDULED" + } + } + } + }, + "com.amazonaws.datazone#DataSourceStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "FAILED_CREATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_CREATION" + } + }, + "READY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "READY" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATING" + } + }, + "FAILED_UPDATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_UPDATE" + } + }, + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "FAILED_DELETION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_DELETION" + } + } + } + }, + "com.amazonaws.datazone#DataSourceSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DataSourceSummary" + } + }, + "com.amazonaws.datazone#DataSourceSummary": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source exists.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which the data source exists.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The ID of the data source.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the data source.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#required": {} + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

Specifies whether the data source is enabled.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration" + }, + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the last data source run.

" + } + }, + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source run was last performed.

" + } + }, + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage" + }, + "lastRunAssetCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The count of the assets created during the last data source run.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was created.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the data source.

" + } + }, + "com.amazonaws.datazone#DataSourceType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.datazone#DataZone": { + "type": "service", + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.datazone#AcceptPredictions" + }, + { + "target": "com.amazonaws.datazone#AcceptSubscriptionRequest" + }, + { + "target": "com.amazonaws.datazone#CancelSubscription" + }, + { + "target": "com.amazonaws.datazone#CreateEnvironment" + }, + { + "target": "com.amazonaws.datazone#CreateEnvironmentProfile" + }, + { + "target": "com.amazonaws.datazone#CreateGroupProfile" + }, + { + "target": "com.amazonaws.datazone#CreateListingChangeSet" + }, + { + "target": "com.amazonaws.datazone#CreateProject" + }, + { + "target": "com.amazonaws.datazone#CreateProjectMembership" + }, + { + "target": "com.amazonaws.datazone#CreateSubscriptionGrant" + }, + { + "target": "com.amazonaws.datazone#CreateSubscriptionRequest" + }, + { + "target": "com.amazonaws.datazone#CreateSubscriptionTarget" + }, + { + "target": "com.amazonaws.datazone#CreateUserProfile" + }, + { + "target": "com.amazonaws.datazone#DeleteEnvironment" + }, + { + "target": "com.amazonaws.datazone#DeleteEnvironmentProfile" + }, + { + "target": "com.amazonaws.datazone#DeleteProject" + }, + { + "target": "com.amazonaws.datazone#DeleteProjectMembership" + }, + { + "target": "com.amazonaws.datazone#DeleteSubscriptionGrant" + }, + { + "target": "com.amazonaws.datazone#DeleteSubscriptionRequest" + }, + { + "target": "com.amazonaws.datazone#DeleteSubscriptionTarget" + }, + { + "target": "com.amazonaws.datazone#GetEnvironment" + }, + { + "target": "com.amazonaws.datazone#GetEnvironmentBlueprint" + }, + { + "target": "com.amazonaws.datazone#GetEnvironmentProfile" + }, + { + "target": "com.amazonaws.datazone#GetGroupProfile" + }, + { + "target": "com.amazonaws.datazone#GetIamPortalLoginUrl" + }, + { + "target": "com.amazonaws.datazone#GetProject" + }, + { + "target": "com.amazonaws.datazone#GetSubscription" + }, + { + "target": "com.amazonaws.datazone#GetSubscriptionGrant" + }, + { + "target": "com.amazonaws.datazone#GetSubscriptionRequestDetails" + }, + { + "target": "com.amazonaws.datazone#GetSubscriptionTarget" + }, + { + "target": "com.amazonaws.datazone#GetUserProfile" + }, + { + "target": "com.amazonaws.datazone#ListAssetRevisions" + }, + { + "target": "com.amazonaws.datazone#ListDataSourceRunActivities" + }, + { + "target": "com.amazonaws.datazone#ListEnvironmentBlueprints" + }, + { + "target": "com.amazonaws.datazone#ListEnvironmentProfiles" + }, + { + "target": "com.amazonaws.datazone#ListEnvironments" + }, + { + "target": "com.amazonaws.datazone#ListNotifications" + }, + { + "target": "com.amazonaws.datazone#ListProjectMemberships" + }, + { + "target": "com.amazonaws.datazone#ListProjects" + }, + { + "target": "com.amazonaws.datazone#ListSubscriptionGrants" + }, + { + "target": "com.amazonaws.datazone#ListSubscriptionRequests" + }, + { + "target": "com.amazonaws.datazone#ListSubscriptions" + }, + { + "target": "com.amazonaws.datazone#ListSubscriptionTargets" + }, + { + "target": "com.amazonaws.datazone#ListTagsForResource" + }, + { + "target": "com.amazonaws.datazone#RejectPredictions" + }, + { + "target": "com.amazonaws.datazone#RejectSubscriptionRequest" + }, + { + "target": "com.amazonaws.datazone#RevokeSubscription" + }, + { + "target": "com.amazonaws.datazone#Search" + }, + { + "target": "com.amazonaws.datazone#SearchGroupProfiles" + }, + { + "target": "com.amazonaws.datazone#SearchListings" + }, + { + "target": "com.amazonaws.datazone#SearchTypes" + }, + { + "target": "com.amazonaws.datazone#SearchUserProfiles" + }, + { + "target": "com.amazonaws.datazone#TagResource" + }, + { + "target": "com.amazonaws.datazone#UntagResource" + }, + { + "target": "com.amazonaws.datazone#UpdateEnvironment" + }, + { + "target": "com.amazonaws.datazone#UpdateEnvironmentProfile" + }, + { + "target": "com.amazonaws.datazone#UpdateGroupProfile" + }, + { + "target": "com.amazonaws.datazone#UpdateProject" + }, + { + "target": "com.amazonaws.datazone#UpdateSubscriptionGrantStatus" + }, + { + "target": "com.amazonaws.datazone#UpdateSubscriptionRequest" + }, + { + "target": "com.amazonaws.datazone#UpdateSubscriptionTarget" + }, + { + "target": "com.amazonaws.datazone#UpdateUserProfile" + } + ], + "resources": [ + { + "target": "com.amazonaws.datazone#Asset" + }, + { + "target": "com.amazonaws.datazone#AssetType" + }, + { + "target": "com.amazonaws.datazone#DataSource" + }, + { + "target": "com.amazonaws.datazone#DataSourceRun" + }, + { + "target": "com.amazonaws.datazone#Domain" + }, + { + "target": "com.amazonaws.datazone#EnvironmentBlueprintConfiguration" + }, + { + "target": "com.amazonaws.datazone#FormType" + }, + { + "target": "com.amazonaws.datazone#Glossary" + }, + { + "target": "com.amazonaws.datazone#GlossaryTerm" + }, + { + "target": "com.amazonaws.datazone#Listing" + } + ], + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#UnauthorizedException" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "DataZone", + "arnNamespace": "datazone", + "serviceName": "datazone", + "cloudTrailEventSource": "datazone.amazonaws.com" + }, + "aws.auth#sigv4": { + "name": "datazone" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "origin": "*", + "additionalAllowedHeaders": [ + "*", + "Authorization", + "Date", + "X-Amz-Date", + "X-Amz-Security-Token", + "X-Amz-Target", + "content-type", + "x-amz-content-sha256", + "x-amz-user-agent", + "x-amzn-platform-id", + "x-amzn-trace-id" + ], + "additionalExposedHeaders": [ + "x-amzn-errortype", + "x-amzn-requestid", + "x-amzn-errormessage", + "x-amzn-trace-id", + "x-amzn-requestid", + "x-amz-apigw-id", + "date" + ], + "maxAge": 86400 + }, + "smithy.api#documentation": "

Amazon DataZone is a data management service that enables you to catalog, discover,\n govern, share, and analyze your data. With Amazon DataZone, you can share and access your\n data across accounts and supported regions. Amazon DataZone simplifies your experience\n across Amazon Web Services services, including, but not limited to, Amazon Redshift, Amazon\n Athena, Amazon Web Services Glue, and Amazon Web Services Lake Formation.

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#title": "Amazon DataZone", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://datazone-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://datazone.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://datazone-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://datazone.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://datazone-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://datazone.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://datazone-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://datazone.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://datazone-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://datazone.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.datazone#DateTime": { + "type": "timestamp", + "traits": { + "smithy.api#timestampFormat": "date-time" + } + }, + "com.amazonaws.datazone#DecisionComment": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DeleteAsset": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteAssetInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteAssetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delets an asset in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteAssetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the asset that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteAssetOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteAssetType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteAssetTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteAssetTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an asset type in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/asset-types/{identifier}" + } + } + }, + "com.amazonaws.datazone#DeleteAssetTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the asset type that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteAssetTypeOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteDataSource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteDataSourceInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteDataSourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a data source in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/data-sources/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteDataSourceInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#httpQuery": "clientToken", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteDataSourceOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The ID of the data source that is deleted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of this data source.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType", + "traits": { + "smithy.api#documentation": "

The type of this data source.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source that is deleted.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the data source that is deleted.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source is deleted.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project in which this data source exists and from which it's\n deleted.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environemnt associated with this data source.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", + "traits": { + "smithy.api#documentation": "

The configuration of the data source that is deleted.

", + "smithy.api#notProperty": {} + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

The enable setting of the data source that specifies whether the data source is enabled\n or disabled.

" + } + }, + "publishOnImport": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" + } + }, + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The asset data forms associated with this data source.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", + "traits": { + "smithy.api#documentation": "

The schedule of runs for this data source.

" + } + }, + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the last run of this data source.

" + } + }, + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was last run.

" + } + }, + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when this data source was created.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when this data source was updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteDomain": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteDomainInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteDomainOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a Amazon DataZone domain.

", + "smithy.api#http": { + "code": 202, + "method": "DELETE", + "uri": "/v2/domains/{identifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#DeleteDomainInput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services domain that is to be deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#httpQuery": "clientToken", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteDomainOutput": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.datazone#DomainStatus", + "traits": { + "smithy.api#documentation": "

The status of the domain.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironment": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteEnvironmentInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an environment in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/environments/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the blueprint configuration in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the blueprint configuration is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint the configuration of which is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment that is to be deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteEnvironmentProfileInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an environment profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment profile is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The ID of the environment profile that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteFormType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteFormTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteFormTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delets and metadata form type in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/form-types/{formTypeIdentifier}" + } + } + }, + "com.amazonaws.datazone#DeleteFormTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the metadata form type is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "formTypeIdentifier": { + "target": "com.amazonaws.datazone#FormTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the metadata form type that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteFormTypeOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteGlossary": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteGlossaryInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteGlossaryOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a business glossary in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/glossaries/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteGlossaryInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the business glossary is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteGlossaryOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteGlossaryTerm": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteGlossaryTermInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteGlossaryTermOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a business glossary term in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteGlossaryTermInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the business glossary term is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary term that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteGlossaryTermOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteListing": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteListingInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteListingOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/listings/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteListingInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteListingOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteProjectInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteProjectOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a project in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/projects/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteProjectInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the project is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that is to be deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteProjectMembership": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteProjectMembershipInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteProjectMembershipOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes project membership in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/deleteMembership" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteProjectMembershipInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where project membership is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project the membership to which is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "member": { + "target": "com.amazonaws.datazone#Member", + "traits": { + "smithy.api#documentation": "

The project member whose project membership is deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteProjectMembershipOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteProjectOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteSubscriptionGrant": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteSubscriptionGrantInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteSubscriptionGrantOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes and subscription grant in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}" + } + } + }, + "com.amazonaws.datazone#DeleteSubscriptionGrantInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the subscription grant is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription grant that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteSubscriptionGrantOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription grant that is deleted.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription grant that is deleted.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant that is deleted.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant is deleted.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant that is deleted was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant that is deleted was updated.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target associated with the subscription grant that is\n deleted.

", + "smithy.api#required": {} + } + }, + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", + "traits": { + "smithy.api#documentation": "

The entity to which the subscription is deleted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription grant that is deleted.

", + "smithy.api#required": {} + } + }, + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", + "traits": { + "smithy.api#documentation": "

The assets for which the subsctiption grant that is deleted gave access.

" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subsctiption whose subscription grant is to be deleted.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#DeleteSubscriptionRequest": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteSubscriptionRequestInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a subscription request in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}" + } + } + }, + "com.amazonaws.datazone#DeleteSubscriptionRequestInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription request is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription request that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteSubscriptionTarget": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteSubscriptionTargetInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a subscription target in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}" + } + } + }, + "com.amazonaws.datazone#DeleteSubscriptionTargetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription target is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone environment in which the subscription target is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#Deployment": { + "type": "structure", + "members": { + "deploymentId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the last deployment of the environment.

" + } + }, + "deploymentType": { + "target": "com.amazonaws.datazone#DeploymentType", + "traits": { + "smithy.api#documentation": "

The type of the last deployment of the environment.

" + } + }, + "deploymentStatus": { + "target": "com.amazonaws.datazone#DeploymentStatus", + "traits": { + "smithy.api#documentation": "

The status of the last deployment of the environment.

" + } + }, + "failureReason": { + "target": "com.amazonaws.datazone#EnvironmentError", + "traits": { + "smithy.api#documentation": "

The failure reason of the last deployment of the environment.

" + } + }, + "messages": { + "target": "com.amazonaws.datazone#DeploymentMessagesList", + "traits": { + "smithy.api#documentation": "

The messages of the last deployment of the environment.

" + } + }, + "isDeploymentComplete": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the last deployment of the environment is complete.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the last deployment of the environment.

" + } + }, + "com.amazonaws.datazone#DeploymentMessage": { + "type": "string" + }, + "com.amazonaws.datazone#DeploymentMessagesList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DeploymentMessage" + } + }, + "com.amazonaws.datazone#DeploymentProperties": { + "type": "structure", + "members": { + "startTimeoutMinutes": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The start timeout of the environment blueprint deployment.

", + "smithy.api#range": { + "min": 1, + "max": 225 + } + } + }, + "endTimeoutMinutes": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The end timeout of the environment blueprint deployment.

", + "smithy.api#range": { + "min": 1, + "max": 225 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The deployment properties of the Amazon DataZone blueprint.

" + } + }, + "com.amazonaws.datazone#DeploymentStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "SUCCESSFUL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESSFUL" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "PENDING_DEPLOYMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_DEPLOYMENT" + } + } + } + }, + "com.amazonaws.datazone#DeploymentType": { + "type": "enum", + "members": { + "CREATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE" + } + }, + "UPDATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE" + } + }, + "DELETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE" + } + } + } + }, + "com.amazonaws.datazone#Description": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DetailedGlossaryTerm": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name of a glossary term attached to the inventory asset.

" + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The shoft description of a glossary term attached to the inventory asset.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of a glossary term attached to the inventory asset.

" + } + }, + "com.amazonaws.datazone#DetailedGlossaryTerms": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerm" + } + }, + "com.amazonaws.datazone#Domain": { + "type": "resource", + "identifiers": { + "identifier": { + "target": "com.amazonaws.datazone#DomainId" + } + }, + "properties": { + "name": { + "target": "smithy.api#String" + }, + "description": { + "target": "smithy.api#String" + }, + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn" + }, + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn" + }, + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn" + }, + "tags": { + "target": "com.amazonaws.datazone#Tags" + }, + "id": { + "target": "com.amazonaws.datazone#DomainId" + }, + "status": { + "target": "com.amazonaws.datazone#DomainStatus" + }, + "arn": { + "target": "smithy.api#String" + }, + "portalUrl": { + "target": "smithy.api#String" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "lastUpdatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateDomain" + }, + "read": { + "target": "com.amazonaws.datazone#GetDomain" + }, + "update": { + "target": "com.amazonaws.datazone#UpdateDomain" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteDomain" + }, + "list": { + "target": "com.amazonaws.datazone#ListDomains" + } + }, + "com.amazonaws.datazone#DomainDescription": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DomainId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^dzd[-_][a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#DomainName": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#DomainStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVAILABLE" + } + }, + "CREATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATION_FAILED" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETED" + } + }, + "DELETION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETION_FAILED" + } + } + } + }, + "com.amazonaws.datazone#DomainSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#DomainSummary" + } + }, + "com.amazonaws.datazone#DomainSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#DomainName", + "traits": { + "smithy.api#documentation": "

A name of an Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#DomainDescription", + "traits": { + "smithy.api#documentation": "

A description of an Amazon DataZone domain.

" + } + }, + "arn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "managedAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services account that manages the domain.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DomainStatus", + "traits": { + "smithy.api#documentation": "

The status of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "portalUrl": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data portal URL for the Amazon DataZone domain.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

A timestamp of when a Amazon DataZone domain was created.

", + "smithy.api#required": {} + } + }, + "lastUpdatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

A timestamp of when a Amazon DataZone domain was last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summary of a Amazon DataZone domain.

" + } + }, + "com.amazonaws.datazone#EnableSetting": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.datazone#EnabledRegionList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#RegionName" + }, + "traits": { + "smithy.api#length": { + "min": 0 + } + } + }, + "com.amazonaws.datazone#EntityId": { + "type": "string" + }, + "com.amazonaws.datazone#EntityType": { + "type": "enum", + "members": { + "ASSET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSET" + } + } + } + }, + "com.amazonaws.datazone#EnvironmentActionList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ConfigurableEnvironmentAction" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintConfiguration": { + "type": "resource", + "identifiers": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId" + } + }, + "properties": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId" + }, + "provisioningRoleArn": { + "target": "com.amazonaws.datazone#RoleArn" + }, + "manageAccessRoleArn": { + "target": "com.amazonaws.datazone#RoleArn" + }, + "enabledRegions": { + "target": "com.amazonaws.datazone#EnabledRegionList" + }, + "regionalParameters": { + "target": "com.amazonaws.datazone#RegionalParameterMap" + }, + "createdAt": { + "target": "smithy.api#Timestamp" + }, + "updatedAt": { + "target": "smithy.api#Timestamp" + } + }, + "put": { + "target": "com.amazonaws.datazone#PutEnvironmentBlueprintConfiguration" + }, + "read": { + "target": "com.amazonaws.datazone#GetEnvironmentBlueprintConfiguration" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfiguration" + }, + "list": { + "target": "com.amazonaws.datazone#ListEnvironmentBlueprintConfigurations" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintConfigurationItem": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which an environment blueprint exists.

", + "smithy.api#required": {} + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment blueprint.

", + "smithy.api#required": {} + } + }, + "provisioningRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the provisioning role specified in the environment blueprint\n configuration.

" + } + }, + "manageAccessRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the manage access role specified in the environment blueprint\n configuration.

" + } + }, + "enabledRegions": { + "target": "com.amazonaws.datazone#EnabledRegionList", + "traits": { + "smithy.api#documentation": "

The enabled Amazon Web Services Regions specified in a blueprint configuration.

" + } + }, + "regionalParameters": { + "target": "com.amazonaws.datazone#RegionalParameterMap", + "traits": { + "smithy.api#documentation": "

The regional parameters of the environment blueprint.

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when an environment blueprint was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment blueprint was updated.

", + "smithy.api#timestampFormat": "date-time" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details of an environment blueprint.

" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintConfigurationItem" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w -]+$" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintSummary" + } + }, + "com.amazonaws.datazone#EnvironmentBlueprintSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the blueprint.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintName", + "traits": { + "smithy.api#documentation": "

The name of the blueprint.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of a blueprint.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the blueprint.

", + "smithy.api#required": {} + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties of the blueprint.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when an environment blueprint was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the blueprint was enabled.

", + "smithy.api#timestampFormat": "date-time" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of an environment blueprint summary.

" + } + }, + "com.amazonaws.datazone#EnvironmentError": { + "type": "structure", + "members": { + "code": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error code for the failure reason for the environment deployment.

" + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error message for the failure reason for the environment deployment.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The failure reasons for the environment deployment.

" + } + }, + "com.amazonaws.datazone#EnvironmentId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#EnvironmentName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w -]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#EnvironmentParameter": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of an environment profile parameter.

" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value of an environment profile parameter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameter details of an evironment profile.

" + } + }, + "com.amazonaws.datazone#EnvironmentParametersList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#EnvironmentParameter" + } + }, + "com.amazonaws.datazone#EnvironmentProfileId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#EnvironmentProfileName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w -]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#EnvironmentProfileSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#EnvironmentProfileSummary" + } + }, + "com.amazonaws.datazone#EnvironmentProfileSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment profile.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment profile exists.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The identifier of an Amazon Web Services account in which an environment profile exists.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region in which an environment profile exists.

" + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the environment profile.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when an environment profile was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment profile was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

The name of the environment profile.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the environment profile.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of a blueprint with which an environment profile is created.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of a project in which an environment profile exists.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of an environment profile.

" + } + }, + "com.amazonaws.datazone#EnvironmentStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATING" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "CREATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_FAILED" + } + }, + "UPDATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE_FAILED" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_FAILED" + } + }, + "VALIDATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VALIDATION_FAILED" + } + }, + "SUSPENDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUSPENDED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETED" + } + }, + "INACCESSIBLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACCESSIBLE" + } + } + } + }, + "com.amazonaws.datazone#EnvironmentSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#EnvironmentSummary" + } + }, + "com.amazonaws.datazone#EnvironmentSummary": { + "type": "structure", + "members": { + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project in which the environment exists.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment exists.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the environment.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentName", + "traits": { + "smithy.api#documentation": "

The name of the environment.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the environment.

" + } + }, + "environmentProfileId": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment profile with which the environment was created.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services account in which an environment exists.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region in which an environment exists.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the environment.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#EnvironmentStatus", + "traits": { + "smithy.api#documentation": "

The status of the environment.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of an environment.

" + } + }, + "com.amazonaws.datazone#ErrorMessage": { + "type": "string" + }, + "com.amazonaws.datazone#ExternalIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FailureCause": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the error message.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "com.amazonaws.datazone#Filter": { + "type": "structure", + "members": { + "attribute": { + "target": "com.amazonaws.datazone#Attribute", + "traits": { + "smithy.api#documentation": "

A search filter attribute in Amazon DataZone.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A search filter value in Amazon DataZone.

", + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A search filter in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#FilterClause": { + "type": "union", + "members": { + "filter": { + "target": "com.amazonaws.datazone#Filter", + "traits": { + "smithy.api#documentation": "

A search filter in Amazon DataZone.

" + } + }, + "and": { + "target": "com.amazonaws.datazone#FilterList", + "traits": { + "smithy.api#documentation": "

The 'and' search filter clause in Amazon DataZone.

" + } + }, + "or": { + "target": "com.amazonaws.datazone#FilterList", + "traits": { + "smithy.api#documentation": "

The 'or' search filter clause in Amazon DataZone.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A search filter clause in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#FilterExpression": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.datazone#FilterExpressionType", + "traits": { + "smithy.api#documentation": "

The search filter explresison type.

", + "smithy.api#required": {} + } + }, + "expression": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The search filter expression.

", + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A filter expression in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#FilterExpressionType": { + "type": "enum", + "members": { + "INCLUDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCLUDE" + } + }, + "EXCLUDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXCLUDE" + } + } + } + }, + "com.amazonaws.datazone#FilterExpressions": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#FilterExpression" + } + }, + "com.amazonaws.datazone#FilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#FilterClause" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.datazone#FirstName": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FormEntryInput": { + "type": "structure", + "members": { + "typeIdentifier": { + "target": "com.amazonaws.datazone#FormTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The type ID of the form entry.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The type revision of the form entry.

", + "smithy.api#required": {} + } + }, + "required": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether a form entry is required.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the form entry.

" + } + }, + "com.amazonaws.datazone#FormEntryOutput": { + "type": "structure", + "members": { + "typeName": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of the type of the form entry.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The type revision of the form entry.

", + "smithy.api#required": {} + } + }, + "required": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether a form entry is required.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the form entry.

" + } + }, + "com.amazonaws.datazone#FormInput": { + "type": "structure", + "members": { + "formName": { + "target": "com.amazonaws.datazone#FormName", + "traits": { + "smithy.api#documentation": "

The name of the metadata form.

", + "smithy.api#required": {} + } + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#FormTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the metadata form type.

" + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the metadata form type.

" + } + }, + "content": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The content of the metadata form.

", + "smithy.api#length": { + "max": 75000 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a metadata form.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FormInputList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#FormInput" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FormName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$" + } + }, + "com.amazonaws.datazone#FormOutput": { + "type": "structure", + "members": { + "formName": { + "target": "com.amazonaws.datazone#FormName", + "traits": { + "smithy.api#documentation": "

The name of the metadata form.

", + "smithy.api#required": {} + } + }, + "typeName": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of the metadata form type.

" + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the metadata form type.

" + } + }, + "content": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The content of the metadata form.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a metadata form.

" + } + }, + "com.amazonaws.datazone#FormOutputList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#FormOutput" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.datazone#FormType": { + "type": "resource", + "identifiers": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "formTypeIdentifier": { + "target": "com.amazonaws.datazone#FormTypeIdentifier" + }, + "revision": { + "target": "com.amazonaws.datazone#Revision" + } + }, + "properties": { + "model": { + "target": "com.amazonaws.datazone#Model" + }, + "name": { + "target": "com.amazonaws.datazone#FormTypeName" + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "status": { + "target": "com.amazonaws.datazone#FormTypeStatus" + }, + "description": { + "target": "com.amazonaws.datazone#Description" + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateFormType" + }, + "collectionOperations": [ + { + "target": "com.amazonaws.datazone#DeleteFormType" + }, + { + "target": "com.amazonaws.datazone#GetFormType" + } + ] + }, + "com.amazonaws.datazone#FormTypeData": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the form type exists.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of the form type.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the form type.

", + "smithy.api#required": {} + } + }, + "model": { + "target": "com.amazonaws.datazone#Model", + "traits": { + "smithy.api#documentation": "

The model of the form type.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#FormTypeStatus", + "traits": { + "smithy.api#documentation": "

The status of the form type.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that owns the form type.

" + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the form type was originally\n created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project in which the form type was originally created.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the metadata form type was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created teh metadata form type.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the metadata form type.

" + } + }, + "imports": { + "target": "com.amazonaws.datazone#ImportList", + "traits": { + "smithy.api#documentation": "

The imports specified in the form type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the metadata form type.

" + } + }, + "com.amazonaws.datazone#FormTypeIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 385 + }, + "smithy.api#pattern": "^(?!\\.)[\\w\\.]*\\w$" + } + }, + "com.amazonaws.datazone#FormTypeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(amazon.datazone.)?(?![0-9_])\\w+$|^_\\w*[a-zA-Z0-9]\\w*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#FormTypeStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.datazone#Forms": { + "type": "string" + }, + "com.amazonaws.datazone#FormsInputMap": { + "type": "map", + "key": { + "target": "com.amazonaws.datazone#FormName" + }, + "value": { + "target": "com.amazonaws.datazone#FormEntryInput" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.datazone#FormsOutputMap": { + "type": "map", + "key": { + "target": "com.amazonaws.datazone#FormName" + }, + "value": { + "target": "com.amazonaws.datazone#FormEntryOutput" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.datazone#GetAsset": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetAssetInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetAssetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone asset.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetAssetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain to which the asset belongs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the Amazon DataZone asset.

", + "smithy.api#httpQuery": "revision" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetAssetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The ID of the asset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#AssetName", + "traits": { + "smithy.api#documentation": "

The name of the asset.

", + "smithy.api#required": {} + } + }, + "typeIdentifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the asset type.

", + "smithy.api#required": {} + } + }, + "typeRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#required": {} + } + }, + "externalIdentifier": { + "target": "com.amazonaws.datazone#ExternalIdentifier", + "traits": { + "smithy.api#documentation": "

" + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone asset.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the asset.

" + } + }, + "firstRevisionCreatedAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the first revision of the asset was created.

" + } + }, + "firstRevisionCreatedBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the first revision of the asset.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The business glossary terms attached to the asset.

" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that owns the asset.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain to which the asset belongs.

", + "smithy.api#required": {} + } + }, + "listing": { + "target": "com.amazonaws.datazone#AssetListingDetails", + "traits": { + "smithy.api#documentation": "

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms attached to the asset.

", + "smithy.api#required": {} + } + }, + "readOnlyFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The read-only metadata forms attached to the asset.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetAssetType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetAssetTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetAssetTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone asset type.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/asset-types/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetAssetTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the asset type.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#httpQuery": "revision" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetAssetTypeOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type exists.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The name of the asset type.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset type.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the asset type.

" + } + }, + "formsOutput": { + "target": "com.amazonaws.datazone#FormsOutputMap", + "traits": { + "smithy.api#documentation": "

The metadata forms attached to the asset type.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project that owns the asset type.

", + "smithy.api#required": {} + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the asset type was originally created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which the asset type was originally created.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset type was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the asset type.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the asset type was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user that updated the asset type.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetDataSource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetDataSourceInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetDataSourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone data source.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-sources/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetDataSourceInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone data source.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetDataSourceOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The ID of the data source.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType", + "traits": { + "smithy.api#documentation": "

The type of the data source.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the data source.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the data source exists.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project where the data source creates and publishes assets.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment where this data source creates and publishes assets,

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", + "traits": { + "smithy.api#documentation": "

The configuration of the data source.

", + "smithy.api#notProperty": {} + } + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", + "traits": { + "smithy.api#documentation": "

" + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

Specifies whether this data source is enabled or not.

" + } + }, + "publishOnImport": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the assets that this data source creates in the inventory are to be\n also automatically published to the catalog.

" + } + }, + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The metadata forms attached to the assets created by this data source.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", + "traits": { + "smithy.api#documentation": "

The schedule of the data source runs.

" + } + }, + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the last run of the data source.

" + } + }, + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of the last run of the data source.

" + } + }, + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "lastRunAssetCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of assets created by the data source during its last run.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was created.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetDataSourceRun": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetDataSourceRunInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetDataSourceRunOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone data source run.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-source-runs/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetDataSourceRunInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain in which this data source run was performed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataSourceRunId", + "traits": { + "smithy.api#documentation": "

The ID of the data source run.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetDataSourceRunOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain in which this data source run was performed.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The ID of the data source for this data source run.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#DataSourceRunId", + "traits": { + "smithy.api#documentation": "

The ID of the data source run.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project in which this data source run occured.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of this data source run.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceRunType", + "traits": { + "smithy.api#documentation": "

The type of this data source run.

", + "smithy.api#required": {} + } + }, + "dataSourceConfigurationSnapshot": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The configuration snapshot of the data source run.

" + } + }, + "runStatisticsForAssets": { + "target": "com.amazonaws.datazone#RunStatisticsForAssets", + "traits": { + "smithy.api#documentation": "

The asset statistics from this data source run.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source run was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when this data source run was updated.

", + "smithy.api#required": {} + } + }, + "startedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when this data source run started.

" + } + }, + "stoppedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when this data source run stopped.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetDomain": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetDomainInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetDomainOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone domain.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{identifier}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#GetDomainInput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the specified Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetDomainOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the specified Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone domain.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the Amazon DataZone domain.

" + } + }, + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", + "traits": { + "smithy.api#documentation": "

The single sing-on option of the specified Amazon DataZone domain.

" + } + }, + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The domain execution role with which the Amazon DataZone domain is created.

", + "smithy.api#required": {} + } + }, + "arn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the specified Amazon DataZone domain.

" + } + }, + "kmsKeyIdentifier": { + "target": "com.amazonaws.datazone#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services Key Management Service (KMS) key that is used\n to encrypt the Amazon DataZone domain, metadata, and reporting data.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#DomainStatus", + "traits": { + "smithy.api#documentation": "

The status of the specified Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "portalUrl": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The URL of the data portal for this Amazon DataZone domain.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Amazon DataZone domain was created.

" + } + }, + "lastUpdatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Amazon DataZone domain was last updated.

" + } + }, + "tags": { + "target": "com.amazonaws.datazone#Tags", + "traits": { + "smithy.api#documentation": "

The tags specified for the Amazon DataZone domain.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetEnvironment": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone environment.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprint": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentBlueprintInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentBlueprintOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon DataZone blueprint.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprints/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentBlueprintConfigurationInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentBlueprintConfigurationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the blueprint configuration in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintConfigurationInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where this blueprint exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

He ID of the blueprint.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintConfigurationOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where this blueprint exists.

", + "smithy.api#required": {} + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint.

", + "smithy.api#required": {} + } + }, + "provisioningRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the provisioning role with which this blueprint is created.

" + } + }, + "manageAccessRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the manage access role with which this blueprint is created.

" + } + }, + "enabledRegions": { + "target": "com.amazonaws.datazone#EnabledRegionList", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services regions in which this blueprint is enabled.

" + } + }, + "regionalParameters": { + "target": "com.amazonaws.datazone#RegionalParameterMap", + "traits": { + "smithy.api#documentation": "

The regional parameters of the blueprint.

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this blueprint was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this blueprint was upated.

", + "smithy.api#timestampFormat": "date-time" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain in which this blueprint exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of this Amazon DataZone blueprint.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentBlueprintOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintName", + "traits": { + "smithy.api#documentation": "

The name of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of this Amazon DataZone blueprint.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone blueprint.

", + "smithy.api#required": {} + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "

The deployment properties of this Amazon DataZone blueprint.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of this blueprint.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms attached to this Amazon DataZone blueprint.

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

A timestamp of when this blueprint was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this blueprint was updated.

", + "smithy.api#timestampFormat": "date-time" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the environment exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone environment.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentOutput": { + "type": "structure", + "members": { + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment is created.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain where the environment exists.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the environment.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentName", + "traits": { + "smithy.api#documentation": "

The name of the environment.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the environment.

" + } + }, + "environmentProfileId": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The ID of the environment profile with which the environment is created.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account where the environment exists.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region where the environment exists.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of this Amazon DataZone environment.

", + "smithy.api#required": {} + } + }, + "provisionedResources": { + "target": "com.amazonaws.datazone#ResourceList", + "traits": { + "smithy.api#documentation": "

The provisioned resources of this Amazon DataZone environment.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#EnvironmentStatus", + "traits": { + "smithy.api#documentation": "

The status of this Amazon DataZone environment.

" + } + }, + "environmentActions": { + "target": "com.amazonaws.datazone#EnvironmentActionList", + "traits": { + "smithy.api#documentation": "

The actions of the environment.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The business glossary terms that can be used in this environment.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of this Amazon DataZone environment.

" + } + }, + "lastDeployment": { + "target": "com.amazonaws.datazone#Deployment", + "traits": { + "smithy.api#documentation": "

The details of the last deployment of the environment.

" + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties of this Amazon DataZone environment.

" + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "

The deployment properties of the environment.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The blueprint with which the environment is created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an evinronment profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The ID of the environment profile.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentProfileOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The ID of the environment profile.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this environment profile exists.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account where this environment profile exists.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region where this environment profile exists.

" + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created this environment profile.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment profile was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when this environment profile was upated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

The name of the environment profile.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the environment profile.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which this environment profile is created.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone project in which this environment profile is created.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters of the environment profile.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetFormType": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetFormTypeInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetFormTypeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a metadata form type in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/form-types/{formTypeIdentifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetFormTypeInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "formTypeIdentifier": { + "target": "com.amazonaws.datazone#FormTypeIdentifier", + "traits": { + "smithy.api#documentation": "

The ID of the metadata form type.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of this metadata form type.

", + "smithy.api#httpQuery": "revision" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetFormTypeOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this metadata form type exists.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of the metadata form type.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the metadata form type.

", + "smithy.api#required": {} + } + }, + "model": { + "target": "com.amazonaws.datazone#Model", + "traits": { + "smithy.api#documentation": "

The model of the metadata form type.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that owns this metadata form type.

" + } + }, + "originDomainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the metadata form type was originally\n created.

" + } + }, + "originProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project in which this metadata form type was originally created.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#FormTypeStatus", + "traits": { + "smithy.api#documentation": "

The status of the metadata form type.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when this metadata form type was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created this metadata form type.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the metadata form type.

" + } + }, + "imports": { + "target": "com.amazonaws.datazone#ImportList", + "traits": { + "smithy.api#documentation": "

The imports of the metadata form type.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetGlossary": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetGlossaryInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetGlossaryOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a business glossary in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/glossaries/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetGlossaryInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetGlossaryOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary exists.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project that owns this business glossary.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName", + "traits": { + "smithy.api#documentation": "

The name of the business glossary.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The description of the business glossary.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The status of the business glossary.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when this business glossary was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created this business glossary.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the business glossary was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the business glossary.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetGlossaryTerm": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetGlossaryTermInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetGlossaryTermOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a business glossary term in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetGlossaryTermInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary term.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetGlossaryTermOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which this business glossary term exists.

", + "smithy.api#required": {} + } + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary to which this term belongs.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The ID of the business glossary term.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name of the business glossary term.

", + "smithy.api#required": {} + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short decription of the business glossary term.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description of the business glossary term.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The relations of the business glossary term.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status of the business glossary term.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the business glossary term was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the business glossary.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the business glossary term was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the business glossary term.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetGroupProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetGroupProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetGroupProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a group profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/group-profiles/{groupIdentifier}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#GetGroupProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "groupIdentifier": { + "target": "com.amazonaws.datazone#GroupIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the group profile.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetGroupProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the group profile exists.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#GroupProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the group profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", + "traits": { + "smithy.api#documentation": "

The identifier of the group profile.

" + } + }, + "groupName": { + "target": "com.amazonaws.datazone#GroupProfileName", + "traits": { + "smithy.api#documentation": "

The name of the group for which the specified group profile exists.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetIamPortalLoginUrl": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetIamPortalLoginUrlInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetIamPortalLoginUrlOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the data portal URL for the specified Amazon DataZone domain.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/get-portal-login-url" + } + } + }, + "com.amazonaws.datazone#GetIamPortalLoginUrlInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

the ID of the Amazon DataZone domain the data portal of which you want to get.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetIamPortalLoginUrlOutput": { + "type": "structure", + "members": { + "authCodeUrl": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data portal URL of the specified Amazon DataZone domain.

" + } + }, + "userProfileId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the user profile.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetListing": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetListingInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetListingOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/listings/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetListingInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#default": "latest", + "smithy.api#documentation": "

", + "smithy.api#httpQuery": "listingRevision" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetListingOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the listing was updated.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the listing.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the listing.

" + } + }, + "item": { + "target": "com.amazonaws.datazone#ListingItem", + "traits": { + "smithy.api#documentation": "

" + } + }, + "name": { + "target": "com.amazonaws.datazone#ListingName", + "traits": { + "smithy.api#documentation": "

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

" + } + }, + "status": { + "target": "com.amazonaws.datazone#ListingStatus", + "traits": { + "smithy.api#documentation": "

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetProjectInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetProjectOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a project in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/projects/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetProjectInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the project exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetProjectOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the project exists.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

>The ID of the project.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name of the project.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the project.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the project.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "lastUpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was last updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The business glossary terms that can be used in the project.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetSubscription": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetSubscriptionInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetSubscriptionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a subscription in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/subscriptions/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionGrant": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetSubscriptionGrantInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetSubscriptionGrantOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the subscription grant in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionGrantInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription grant.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionGrantOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription grant.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription grant.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription grant exists.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant was upated.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The subscription target ID associated with the subscription grant.

", + "smithy.api#required": {} + } + }, + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", + "traits": { + "smithy.api#documentation": "

The entity to which the subscription is granted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription grant.

", + "smithy.api#required": {} + } + }, + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", + "traits": { + "smithy.api#documentation": "

The assets for which the subscription grant is created.

" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription exists.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was updated.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipal": { + "target": "com.amazonaws.datazone#SubscribedPrincipal", + "traits": { + "smithy.api#documentation": "

The principal that owns the subscription.

", + "smithy.api#required": {} + } + }, + "subscribedListing": { + "target": "com.amazonaws.datazone#SubscribedListing", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "subscriptionRequestId": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription request.

" + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The retain permissions of the subscription.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionRequestDetails": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetSubscriptionRequestDetailsInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetSubscriptionRequestDetailsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the details of the specified subscription request.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionRequestDetailsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to get the subscription request\n details.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request the details of which to get.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionRequestDetailsOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription request.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription request.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone domain of the subscription request.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the specified subscription request was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

The subscribed principals in the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

The subscribed listings in the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone user who reviewed the subscription request.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the subscription request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionTarget": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetSubscriptionTargetInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetSubscriptionTargetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the subscription target in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionTargetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription target exists.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment associated with the subscription target.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetSubscriptionTargetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription target.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorized principals of the subscription target.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the subscription target exists.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project associated with the subscription target.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment associated with the subscription target.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription target.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription target.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was updated.

" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role with which the subscription target was created.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The asset types associated with the subscription target.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration of teh subscription target.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the subscription target.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#GetUserProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetUserProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetUserProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets a user profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/user-profiles/{userIdentifier}" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#GetUserProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

the ID of the Amazon DataZone domain the data portal of which you want to get.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userIdentifier": { + "target": "com.amazonaws.datazone#UserIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the user for which you want to get the user profile.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile.

", + "smithy.api#httpQuery": "type" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetUserProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

the identifier of the Amazon DataZone domain of which you want to get the user\n profile.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#UserProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the user profile.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the user profile.

" + } + }, + "details": { + "target": "com.amazonaws.datazone#UserProfileDetails" + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#Glossary": { + "type": "resource", + "identifiers": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryId" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#GlossaryId" + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName" + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription" + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus" + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt" + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateGlossary" + }, + "read": { + "target": "com.amazonaws.datazone#GetGlossary" + }, + "update": { + "target": "com.amazonaws.datazone#UpdateGlossary" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteGlossary" + } + }, + "com.amazonaws.datazone#GlossaryDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#GlossaryId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#GlossaryItem": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the business glossary exists.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The identifier of the glossary.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName", + "traits": { + "smithy.api#documentation": "

The name of the glossary.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that owns the business glosary.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The business glossary description.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The business glossary status.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the glossary was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the glossary.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the business glossary was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the business glossary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a business glossary.

" + } + }, + "com.amazonaws.datazone#GlossaryName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#GlossaryStatus": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + } + }, + "com.amazonaws.datazone#GlossaryTerm": { + "type": "resource", + "identifiers": { + "identifier": { + "target": "com.amazonaws.datazone#GlossaryTermId" + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + } + }, + "properties": { + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "glossaryIdentifier": { + "target": "com.amazonaws.datazone#GlossaryTermId" + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId" + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName" + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription" + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription" + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus" + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt" + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy" + } + }, + "create": { + "target": "com.amazonaws.datazone#CreateGlossaryTerm" + }, + "read": { + "target": "com.amazonaws.datazone#GetGlossaryTerm" + }, + "update": { + "target": "com.amazonaws.datazone#UpdateGlossaryTerm" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteGlossaryTerm" + } + }, + "com.amazonaws.datazone#GlossaryTermId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#GlossaryTermItem": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the business glossary exists.

", + "smithy.api#required": {} + } + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary to which the term belongs.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary term.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name of the business glossary term.

", + "smithy.api#required": {} + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short description of the business glossary term.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description of the business glossary term.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The relations of the business glossary term.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status of the business glossary term.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when a business glossary term was created.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the business glossary.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when a business glossary term was updated.

" + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the business glossary term.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a business glossary term.

" + } + }, + "com.amazonaws.datazone#GlossaryTermName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#GlossaryTermStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.datazone#GlossaryTerms": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#GlossaryTermId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.datazone#GlueRunConfigurationInput": { + "type": "structure", + "members": { + "dataAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data access role included in the configuration details of the Amazon Web Services Glue data\n source.

", + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]{1,128}$" + } + }, + "relationalFilterConfigurations": { + "target": "com.amazonaws.datazone#RelationalFilterConfigurations", + "traits": { + "smithy.api#documentation": "

The relational filter configurations included in the configuration details of the Amazon Web Services\n Glue data source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details of the Amazon Web Services Glue data source.

" + } + }, + "com.amazonaws.datazone#GlueRunConfigurationOutput": { + "type": "structure", + "members": { + "accountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID included in the configuration details of the Amazon Web Services Glue data\n source.

", + "smithy.api#length": { + "min": 12, + "max": 12 + }, + "smithy.api#pattern": "^\\d{12}$" + } + }, + "region": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region included in the configuration details of the Amazon Web Services Glue data source.\n

", + "smithy.api#length": { + "min": 4, + "max": 16 + }, + "smithy.api#pattern": "[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]" + } + }, + "dataAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data access role included in the configuration details of the Amazon Web Services Glue data\n source.

", + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]{1,128}$" + } + }, + "relationalFilterConfigurations": { + "target": "com.amazonaws.datazone#RelationalFilterConfigurations", + "traits": { + "smithy.api#documentation": "

The relational filter configurations included in the configuration details of the Amazon Web Services\n Glue data source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details of the Amazon Web Services Glue data source.

" + } + }, + "com.amazonaws.datazone#GrantedEntity": { + "type": "union", + "members": { + "listing": { + "target": "com.amazonaws.datazone#ListingRevision", + "traits": { + "smithy.api#documentation": "

The listing for which a subscription is granted.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a listing for which a subscription is granted.

" + } + }, + "com.amazonaws.datazone#GrantedEntityInput": { + "type": "union", + "members": { + "listing": { + "target": "com.amazonaws.datazone#ListingRevisionInput", + "traits": { + "smithy.api#documentation": "

The listing for which a subscription is to be granted.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a listing for which a subscription is to be granted.

" + } + }, + "com.amazonaws.datazone#GroupDetails": { + "type": "structure", + "members": { + "groupId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the group in Amazon DataZone.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a group in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#GroupIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r ]+)" + } + }, + "com.amazonaws.datazone#GroupProfileId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + } + }, + "com.amazonaws.datazone#GroupProfileName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[a-zA-Z_0-9+=,.@-]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#GroupProfileStatus": { + "type": "enum", + "members": { + "ASSIGNED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSIGNED" + } + }, + "NOT_ASSIGNED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_ASSIGNED" + } + } + } + }, + "com.amazonaws.datazone#GroupProfileSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#GroupProfileSummary" + } + }, + "com.amazonaws.datazone#GroupProfileSummary": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain of a group profile.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#GroupProfileId", + "traits": { + "smithy.api#documentation": "

The ID of a group profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of a group profile.

" + } + }, + "groupName": { + "target": "com.amazonaws.datazone#GroupProfileName", + "traits": { + "smithy.api#documentation": "

The group name of a group profile.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a group profile.

" + } + }, + "com.amazonaws.datazone#GroupSearchText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#GroupSearchType": { + "type": "enum", + "members": { + "SSO_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SSO_GROUP" + } + }, + "DATAZONE_SSO_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATAZONE_SSO_GROUP" + } + } + } + }, + "com.amazonaws.datazone#IamUserProfileDetails": { + "type": "structure", + "members": { + "arn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of an IAM user profile in Amazon DataZone.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of an IAM user profile in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#Import": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.datazone#FormTypeName", + "traits": { + "smithy.api#documentation": "

The name of the import.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the import.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the import of the metadata form type.

" + } + }, + "com.amazonaws.datazone#ImportList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#Import" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.datazone#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request has failed because of an unknown error, exception or failure.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.datazone#InventorySearchScope": { + "type": "enum", + "members": { + "ASSET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSET" + } + }, + "GLOSSARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOSSARY" + } + }, + "GLOSSARY_TERM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOSSARY_TERM" + } + } + } + }, + "com.amazonaws.datazone#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$" + } + }, + "com.amazonaws.datazone#LastName": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#ListAssetRevisions": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListAssetRevisionsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListAssetRevisionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the revisions for the asset.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}/revisions" + }, + "smithy.api#paginated": {}, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListAssetRevisionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the asset.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of revisions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of revisions, the response includes\n a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListAssetRevisions to\n list the next set of revisions.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of revisions to return in a single call to\n ListAssetRevisions. When the number of revisions to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListAssetRevisions to list the\n next set of revisions.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListAssetRevisionsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#AssetRevisions", + "traits": { + "smithy.api#documentation": "

The results of the ListAssetRevisions action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of revisions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of revisions, the response includes\n a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListAssetRevisions to\n list the next set of revisions.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListDataSourceRunActivities": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListDataSourceRunActivitiesInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListDataSourceRunActivitiesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists data source run activities.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-source-runs/{identifier}/activities" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListDataSourceRunActivitiesInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to list data source run\n activities.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataSourceRunId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source run.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataAssetActivityStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source run.

", + "smithy.api#httpQuery": "status" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of activities is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of activities, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListDataSourceRunActivities to list the next set of activities.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of activities to return in a single call to\n ListDataSourceRunActivities. When the number of activities to be listed is\n greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListDataSourceRunActivities to list the next set of activities.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListDataSourceRunActivitiesOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#DataSourceRunActivities", + "traits": { + "smithy.api#documentation": "

The results of the ListDataSourceRunActivities action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of activities is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of activities, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListDataSourceRunActivities to list the next set of activities.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListDataSourceRuns": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListDataSourceRunsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListDataSourceRunsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists data source runs in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-sources/{dataSourceIdentifier}/runs" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListDataSourceRunsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to invoke the\n ListDataSourceRuns action.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "dataSourceIdentifier": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#httpQuery": "status" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of runs is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of runs, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDataSourceRuns to list the next set of\n runs.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of runs to return in a single call to\n ListDataSourceRuns. When the number of runs to be listed is greater than the\n value of MaxResults, the response contains a NextToken value that\n you can use in a subsequent call to ListDataSourceRuns to list the next set of\n runs.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListDataSourceRunsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#DataSourceRunSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListDataSourceRuns action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of runs is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of runs, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDataSourceRuns to list the next set of\n runs.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListDataSources": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListDataSourcesInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListDataSourcesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists data sources in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/data-sources" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListDataSourcesInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to list the data sources.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "projectIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the project in which to list data sources.

", + "smithy.api#httpQuery": "projectIdentifier", + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the environment in which to list the data sources.

", + "smithy.api#httpQuery": "environmentIdentifier" + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType", + "traits": { + "smithy.api#documentation": "

The type of the data source.

", + "smithy.api#httpQuery": "type" + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#httpQuery": "status" + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name of the data source.

", + "smithy.api#httpQuery": "name" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of data sources is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of data sources, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListDataSources to\n list the next set of data sources.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of data sources to return in a single call to\n ListDataSources. When the number of data sources to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListDataSources to list the\n next set of data sources.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListDataSourcesOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#DataSourceSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListDataSources action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of data sources is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of data sources, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListDataSources to\n list the next set of data sources.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListDomains": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListDomainsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListDomainsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists Amazon DataZone domains.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListDomainsInput": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.datazone#DomainStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

", + "smithy.api#httpQuery": "status" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResultsForListDomains", + "traits": { + "smithy.api#documentation": "

The maximum number of domains to return in a single call to ListDomains.\n When the number of domains to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that you\n can use in a subsequent call to ListDomains to list the next set of\n domains.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of domains is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of domains, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDomains to list the next set of\n domains.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListDomainsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#DomainSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListDomains action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of domains is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of domains, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDomains to list the next set of\n domains.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentBlueprintConfigurations": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListEnvironmentBlueprintConfigurationsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListEnvironmentBlueprintConfigurationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists blueprint configurations for a Amazon DataZone environment.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprint-configurations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#ListEnvironmentBlueprintConfigurationsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of blueprint configurations to return in a single call to\n ListEnvironmentBlueprintConfigurations. When the number of configurations\n to be listed is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListEnvironmentBlueprintConfigurations to list the next set of\n configurations.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of blueprint configurations is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of configurations, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListEnvironmentBlueprintConfigurations to list the next set of\n configurations.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentBlueprintConfigurationsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintConfigurations", + "traits": { + "smithy.api#documentation": "

The results of the ListEnvironmentBlueprintConfigurations action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of blueprint configurations is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of configurations, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListEnvironmentBlueprintConfigurations to list the next set of\n configurations.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentBlueprints": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListEnvironmentBlueprintsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListEnvironmentBlueprintsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists blueprints in an Amazon DataZone environment.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprints" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentBlueprintsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of blueprints to return in a single call to\n ListEnvironmentBlueprints. When the number of blueprints to be listed is\n greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListEnvironmentBlueprints to list the next set of blueprints.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of blueprints in the environment is greater than the default value for\n the MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of blueprints in the environment,\n the response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListEnvironmentBlueprintsto list the next set of blueprints.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintName", + "traits": { + "smithy.api#documentation": "

The name of the Amazon DataZone environment.

", + "smithy.api#httpQuery": "name" + } + }, + "managed": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether the environment blueprint is managed by Amazon DataZone.

", + "smithy.api#httpQuery": "managed" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentBlueprintsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListEnvironmentBlueprints action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of blueprints in the environment is greater than the default value for\n the MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of blueprints in the environment,\n the response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListEnvironmentBlueprintsto list the next set of blueprints.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentProfiles": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListEnvironmentProfilesInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListEnvironmentProfilesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists Amazon DataZone environment profiles.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environment-profiles" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentProfilesInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services account where you want to list environment\n profiles.

", + "smithy.api#httpQuery": "awsAccountId" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region where you want to list environment profiles.

", + "smithy.api#httpQuery": "awsAccountRegion" + } + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the blueprint that was used to create the environment profiles that\n you want to list.

", + "smithy.api#httpQuery": "environmentBlueprintIdentifier" + } + }, + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project.

", + "smithy.api#httpQuery": "projectIdentifier" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpQuery": "name" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of environment profiles is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of environment profiles, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListEnvironmentProfiles to list the next set of environment\n profiles.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of environment profiles to return in a single call to\n ListEnvironmentProfiles. When the number of environment profiles to be\n listed is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListEnvironmentProfiles to list the next set of environment\n profiles.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentProfilesOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#EnvironmentProfileSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListEnvironmentProfiles action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of environment profiles is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of environment profiles, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListEnvironmentProfiles to list the next set of environment\n profiles.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListEnvironments": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListEnvironmentsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListEnvironmentsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists Amazon DataZone environments.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services account where you want to list\n environments.

", + "smithy.api#httpQuery": "awsAccountId" + } + }, + "status": { + "target": "com.amazonaws.datazone#EnvironmentStatus", + "traits": { + "smithy.api#documentation": "

The status of the environments that you want to list.

", + "smithy.api#httpQuery": "status" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region where you want to list environments.

", + "smithy.api#httpQuery": "awsAccountRegion" + } + }, + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone project.

", + "smithy.api#httpQuery": "projectIdentifier", + "smithy.api#required": {} + } + }, + "environmentProfileIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment profile.

", + "smithy.api#httpQuery": "environmentProfileIdentifier" + } + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone blueprint.

", + "smithy.api#httpQuery": "environmentBlueprintIdentifier" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the environment.

", + "smithy.api#httpQuery": "provider" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpQuery": "name" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of environments to return in a single call to\n ListEnvironments. When the number of environments to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListEnvironments to list the\n next set of environments.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of environments is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of environments, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListEnvironments to\n list the next set of environments.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#EnvironmentSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListEnvironments action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of environments is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of environments, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListEnvironments to\n list the next set of environments.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListNotifications": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListNotificationsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListNotificationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all Amazon DataZone notifications.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/notifications" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "notifications" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListNotificationsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#NotificationType", + "traits": { + "smithy.api#documentation": "

The type of notifications.

", + "smithy.api#httpQuery": "type", + "smithy.api#required": {} + } + }, + "afterTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time after which you want to list notifications.

", + "smithy.api#httpQuery": "afterTimestamp" + } + }, + "beforeTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time before which you want to list notifications.

", + "smithy.api#httpQuery": "beforeTimestamp" + } + }, + "subjects": { + "target": "com.amazonaws.datazone#NotificationSubjects", + "traits": { + "smithy.api#documentation": "

The subjects of notifications.

", + "smithy.api#httpQuery": "subjects" + } + }, + "taskStatus": { + "target": "com.amazonaws.datazone#TaskStatus", + "traits": { + "smithy.api#documentation": "

The task status of notifications.

", + "smithy.api#httpQuery": "taskStatus" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of notifications to return in a single call to\n ListNotifications. When the number of notifications to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListNotifications to list the\n next set of notifications.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of notifications is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of notifications, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListNotifications to\n list the next set of notifications.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListNotificationsOutput": { + "type": "structure", + "members": { + "notifications": { + "target": "com.amazonaws.datazone#NotificationsList", + "traits": { + "smithy.api#documentation": "

The results of the ListNotifications action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of notifications is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of notifications, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListNotifications to\n list the next set of notifications.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListProjectMemberships": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListProjectMembershipsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListProjectMembershipsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all members of the specified project.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/projects/{projectIdentifier}/memberships" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "members" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListProjectMembershipsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which you want to list project\n memberships.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "projectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project whose memberships you want to list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sortBy": { + "target": "com.amazonaws.datazone#SortFieldProject", + "traits": { + "smithy.api#documentation": "

The method by which you want to sort the project memberships.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "sortOrder": { + "target": "com.amazonaws.datazone#SortOrder", + "traits": { + "smithy.api#documentation": "

The sort order of the project memberships.

", + "smithy.api#httpQuery": "sortOrder" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of memberships is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of memberships, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListProjectMemberships\n to list the next set of memberships.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of memberships to return in a single call to\n ListProjectMemberships. When the number of memberships to be listed is\n greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListProjectMemberships to list the next set of memberships.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListProjectMembershipsOutput": { + "type": "structure", + "members": { + "members": { + "target": "com.amazonaws.datazone#ProjectMembers", + "traits": { + "smithy.api#documentation": "

The members of the project.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of memberships is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of memberships, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListProjectMemberships\n to list the next set of memberships.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListProjects": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListProjectsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListProjectsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists Amazon DataZone projects.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/projects" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListProjectsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone user.

", + "smithy.api#httpQuery": "userIdentifier" + } + }, + "groupIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of a group.

", + "smithy.api#httpQuery": "groupIdentifier" + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpQuery": "name" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of projects is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of projects, the response includes\n a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListProjects to list\n the next set of projects.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of projects to return in a single call to ListProjects.\n When the number of projects to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that you\n can use in a subsequent call to ListProjects to list the next set of\n projects.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListProjectsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#ProjectSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListProjects action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of projects is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of projects, the response includes\n a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListProjects to list\n the next set of projects.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionGrants": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListSubscriptionGrantsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListSubscriptionGrantsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists subscription grants.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/subscription-grants" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionGrantsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone environment.

", + "smithy.api#httpQuery": "environmentId" + } + }, + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription target.

", + "smithy.api#httpQuery": "subscriptionTargetId" + } + }, + "subscribedListingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscribed listing.

", + "smithy.api#httpQuery": "subscribedListingId" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription.

", + "smithy.api#httpQuery": "subscriptionId" + } + }, + "sortBy": { + "target": "com.amazonaws.datazone#SortKey", + "traits": { + "smithy.api#documentation": "

Specifies the way of sorting the results of this action.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "sortOrder": { + "target": "com.amazonaws.datazone#SortOrder", + "traits": { + "smithy.api#documentation": "

Specifies the sort order of this action.

", + "smithy.api#httpQuery": "sortOrder" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of subscription grants to return in a single call to\n ListSubscriptionGrants. When the number of subscription grants to be listed\n is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListSubscriptionGrants to list the next set of subscription grants.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscription grants is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscription grants, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListSubscriptionGrants\n to list the next set of subscription grants.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionGrantsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#SubscriptionGrants", + "traits": { + "smithy.api#documentation": "

The results of the ListSubscriptionGrants action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscription grants is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscription grants, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListSubscriptionGrants\n to list the next set of subscription grants.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionRequests": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListSubscriptionRequestsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListSubscriptionRequestsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists Amazon DataZone subscription requests.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionRequestsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

Specifies the status of the subscription requests.

", + "smithy.api#httpQuery": "status" + } + }, + "subscribedListingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscribed listing.

", + "smithy.api#httpQuery": "subscribedListingId" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project for the subscription requests.

", + "smithy.api#httpQuery": "owningProjectId" + } + }, + "approverProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request approver's project.

", + "smithy.api#httpQuery": "approverProjectId" + } + }, + "sortBy": { + "target": "com.amazonaws.datazone#SortKey", + "traits": { + "smithy.api#documentation": "

Specifies the way to sort the results of this action.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "sortOrder": { + "target": "com.amazonaws.datazone#SortOrder", + "traits": { + "smithy.api#documentation": "

Specifies the sort order for the results of this action.

", + "smithy.api#httpQuery": "sortOrder" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of subscription requests to return in a single call to\n ListSubscriptionRequests. When the number of subscription requests to be\n listed is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListSubscriptionRequests to list the next set of subscription\n requests.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscription requests is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscription requests, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListSubscriptionRequests to list the next set of subscription\n requests.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionRequestsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#SubscriptionRequests", + "traits": { + "smithy.api#documentation": "

The results of the ListSubscriptionRequests action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscription requests is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscription requests, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListSubscriptionRequests to list the next set of subscription\n requests.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionTargets": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListSubscriptionTargetsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListSubscriptionTargetsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists subscription targets in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionTargetsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where you want to list subscription\n targets.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment where you want to list subscription targets.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sortBy": { + "target": "com.amazonaws.datazone#SortKey", + "traits": { + "smithy.api#documentation": "

Specifies the way in which the results of this action are to be sorted.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "sortOrder": { + "target": "com.amazonaws.datazone#SortOrder", + "traits": { + "smithy.api#documentation": "

Specifies the sort order for the results of this action.

", + "smithy.api#httpQuery": "sortOrder" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of subscription targets to return in a single call to\n ListSubscriptionTargets. When the number of subscription targets to be\n listed is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListSubscriptionTargets to list the next set of subscription targets.\n

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscription targets is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscription targets, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListSubscriptionTargets to list the next set of subscription\n targets.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionTargetsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#SubscriptionTargets", + "traits": { + "smithy.api#documentation": "

The results of the ListSubscriptionTargets action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscription targets is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscription targets, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to\n ListSubscriptionTargets to list the next set of subscription\n targets.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListSubscriptions": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListSubscriptionsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListSubscriptionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists subscriptions in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/subscriptions" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "subscriptionRequestIdentifier": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request for the subscriptions that you want to\n list.

", + "smithy.api#httpQuery": "subscriptionRequestIdentifier" + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscriptions that you want to list.

", + "smithy.api#httpQuery": "status" + } + }, + "subscribedListingId": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscribed listing for the subscriptions that you want to\n list.

", + "smithy.api#httpQuery": "subscribedListingId" + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the owning project.

", + "smithy.api#httpQuery": "owningProjectId" + } + }, + "approverProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project for the subscription's approver.

", + "smithy.api#httpQuery": "approverProjectId" + } + }, + "sortBy": { + "target": "com.amazonaws.datazone#SortKey", + "traits": { + "smithy.api#documentation": "

Specifies the way in which the results of this action are to be sorted.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "sortOrder": { + "target": "com.amazonaws.datazone#SortOrder", + "traits": { + "smithy.api#documentation": "

Specifies the sort order for the results of this action.

", + "smithy.api#httpQuery": "sortOrder" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of subscriptions to return in a single call to\n ListSubscriptions. When the number of subscriptions to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to ListSubscriptions to list the\n next set of Subscriptions.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscriptions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscriptions, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListSubscriptions to\n list the next set of subscriptions.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListSubscriptionsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#Subscriptions", + "traits": { + "smithy.api#documentation": "

The results of the ListSubscriptions action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of subscriptions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of subscriptions, the response\n includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListSubscriptions to\n list the next set of subscriptions.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.datazone#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists tags for the specified resource in Amazon DataZone.

", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the resource whose tags you want to list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.datazone#Tags", + "traits": { + "smithy.api#documentation": "

The tags of the specified resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#Listing": { + "type": "resource", + "identifiers": { + "identifier": { + "target": "com.amazonaws.datazone#ListingId" + } + }, + "properties": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId" + }, + "id": { + "target": "com.amazonaws.datazone#ListingId" + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId" + }, + "listingRevision": { + "target": "com.amazonaws.datazone#Revision" + }, + "name": { + "target": "com.amazonaws.datazone#ListingName" + }, + "description": { + "target": "com.amazonaws.datazone#Description" + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt" + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt" + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy" + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy" + }, + "item": { + "target": "com.amazonaws.datazone#ListingItem" + }, + "status": { + "target": "com.amazonaws.datazone#ListingStatus" + } + }, + "read": { + "target": "com.amazonaws.datazone#GetListing" + }, + "delete": { + "target": "com.amazonaws.datazone#DeleteListing" + } + }, + "com.amazonaws.datazone#ListingId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#ListingItem": { + "type": "union", + "members": { + "assetListing": { + "target": "com.amazonaws.datazone#AssetListing", + "traits": { + "smithy.api#documentation": "

An asset published in an Amazon DataZone catalog.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a listing (aka asset published in a Amazon DataZone catalog).

" + } + }, + "com.amazonaws.datazone#ListingName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.datazone#ListingRevision": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

An identifier of a revision of an asset published in a Amazon DataZone catalog.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The details of a revision of an asset published in a Amazon DataZone catalog.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A revision of an asset published in a Amazon DataZone catalog.

" + } + }, + "com.amazonaws.datazone#ListingRevisionInput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

An identifier of revision to be made to an asset published in a Amazon DataZone\n catalog.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The details of a revision to be made to an asset published in a Amazon DataZone\n catalog.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A revision to be made to an asset published in a Amazon DataZone catalog.

" + } + }, + "com.amazonaws.datazone#ListingStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACTIVE" + } + } + } + }, + "com.amazonaws.datazone#LongDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.datazone#MaxResultsForListDomains": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 25 + } + } + }, + "com.amazonaws.datazone#Member": { + "type": "union", + "members": { + "userIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user ID of a project member.

" + } + }, + "groupIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the group of a project member.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details about a project member.

" + } + }, + "com.amazonaws.datazone#MemberDetails": { + "type": "union", + "members": { + "user": { + "target": "com.amazonaws.datazone#UserDetails", + "traits": { + "smithy.api#documentation": "

The user details of a project member.

" + } + }, + "group": { + "target": "com.amazonaws.datazone#GroupDetails", + "traits": { + "smithy.api#documentation": "

The group details of a project member.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details about a project member.

" + } + }, + "com.amazonaws.datazone#Message": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 16384 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#MetadataMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.datazone#Model": { + "type": "union", + "members": { + "smithy": { + "target": "com.amazonaws.datazone#Smithy", + "traits": { + "smithy.api#documentation": "

" + } + } + }, + "traits": { + "smithy.api#documentation": "

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#Name": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#NotificationOutput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#TaskId", + "traits": { + "smithy.api#documentation": "

The identifier of the notification.

", + "smithy.api#required": {} + } + }, + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of a Amazon DataZone domain in which the notification exists.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#NotificationType", + "traits": { + "smithy.api#documentation": "

The type of the notification.

", + "smithy.api#required": {} + } + }, + "topic": { + "target": "com.amazonaws.datazone#Topic", + "traits": { + "smithy.api#documentation": "

The topic of the notification.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.datazone#Title", + "traits": { + "smithy.api#documentation": "

The title of the notification.

", + "smithy.api#required": {} + } + }, + "message": { + "target": "com.amazonaws.datazone#Message", + "traits": { + "smithy.api#documentation": "

The message included in the notification.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#TaskStatus", + "traits": { + "smithy.api#documentation": "

The status included in the notification.

" + } + }, + "actionLink": { + "target": "com.amazonaws.datazone#ActionLink", + "traits": { + "smithy.api#documentation": "

The action link included in the notification.

", + "smithy.api#required": {} + } + }, + "creationTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when a notification was created.

", + "smithy.api#required": {} + } + }, + "lastUpdatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the notification was last updated.

", + "smithy.api#required": {} + } + }, + "metadata": { + "target": "com.amazonaws.datazone#MetadataMap", + "traits": { + "smithy.api#documentation": "

The metadata included in the notification.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a notification generated in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#NotificationResource": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.datazone#NotificationResourceType", + "traits": { + "smithy.api#documentation": "

The type of the resource mentioned in a notification.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the resource mentioned in a notification.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the resource mentioned in a notification.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the resource mentioned in a notification.

" + } + }, + "com.amazonaws.datazone#NotificationResourceType": { + "type": "enum", + "members": { + "PROJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT" + } + } + } + }, + "com.amazonaws.datazone#NotificationRole": { + "type": "enum", + "members": { + "PROJECT_OWNER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT_OWNER" + } + }, + "PROJECT_CONTRIBUTOR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT_CONTRIBUTOR" + } + }, + "PROJECT_VIEWER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT_VIEWER" + } + }, + "DOMAIN_OWNER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOMAIN_OWNER" + } + }, + "PROJECT_SUBSCRIBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT_SUBSCRIBER" + } + } + } + }, + "com.amazonaws.datazone#NotificationSubjects": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.datazone#NotificationType": { + "type": "enum", + "members": { + "TASK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TASK" + } + }, + "EVENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EVENT" + } + } + } + }, + "com.amazonaws.datazone#NotificationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#NotificationOutput" + } + }, + "com.amazonaws.datazone#PaginationToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 8192 + } + } + }, + "com.amazonaws.datazone#PredictionChoices": { + "type": "list", + "member": { + "target": "smithy.api#Integer" + } + }, + "com.amazonaws.datazone#PredictionConfiguration": { + "type": "structure", + "members": { + "businessNameGeneration": { + "target": "com.amazonaws.datazone#BusinessNameGenerationConfiguration", + "traits": { + "smithy.api#documentation": "

The business name generation mechanism.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the prediction.

" + } + }, + "com.amazonaws.datazone#ProjectId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#ProjectMember": { + "type": "structure", + "members": { + "memberDetails": { + "target": "com.amazonaws.datazone#MemberDetails", + "traits": { + "smithy.api#documentation": "

The membership details of a project member.

", + "smithy.api#required": {} + } + }, + "designation": { + "target": "com.amazonaws.datazone#UserDesignation", + "traits": { + "smithy.api#documentation": "

The designated role of a project member.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a project member.

" + } + }, + "com.amazonaws.datazone#ProjectMembers": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ProjectMember" + } + }, + "com.amazonaws.datazone#ProjectName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w -]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#ProjectSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#ProjectSummary" + } + }, + "com.amazonaws.datazone#ProjectSummary": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of a Amazon DataZone domain where the project exists.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of a project.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name of a project.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of a project.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the project.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when a project was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was updated.

", + "smithy.api#timestampFormat": "date-time" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a Amazon DataZone project.

" + } + }, + "com.amazonaws.datazone#ProvisioningProperties": { + "type": "union", + "members": { + "cloudFormation": { + "target": "com.amazonaws.datazone#CloudFormationProperties", + "traits": { + "smithy.api#documentation": "

The cloud formation properties included as part of the provisioning properties of an\n environment blueprint.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The provisioning properties of an environment blueprint.

" + } + }, + "com.amazonaws.datazone#PutEnvironmentBlueprintConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#PutEnvironmentBlueprintConfigurationInput" + }, + "output": { + "target": "com.amazonaws.datazone#PutEnvironmentBlueprintConfigurationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Writes the configuration for the specified environment blueprint in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#PutEnvironmentBlueprintConfigurationInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentBlueprintIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment blueprint.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "provisioningRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the provisioning role.

" + } + }, + "manageAccessRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the manage access role.

" + } + }, + "enabledRegions": { + "target": "com.amazonaws.datazone#EnabledRegionList", + "traits": { + "smithy.api#documentation": "

Specifies the enabled Amazon Web Services Regions.

", + "smithy.api#required": {} + } + }, + "regionalParameters": { + "target": "com.amazonaws.datazone#RegionalParameterMap", + "traits": { + "smithy.api#documentation": "

The regional parameters in the environment blueprint.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#PutEnvironmentBlueprintConfigurationOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment blueprint.

", + "smithy.api#required": {} + } + }, + "provisioningRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the provisioning role.

" + } + }, + "manageAccessRoleArn": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The ARN of the manage access role.

" + } + }, + "enabledRegions": { + "target": "com.amazonaws.datazone#EnabledRegionList", + "traits": { + "smithy.api#documentation": "

Specifies the enabled Amazon Web Services Regions.

" + } + }, + "regionalParameters": { + "target": "com.amazonaws.datazone#RegionalParameterMap", + "traits": { + "smithy.api#documentation": "

The regional parameters in the environment blueprint.

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment blueprint was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment blueprint was updated.

", + "smithy.api#timestampFormat": "date-time" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#RecommendationConfiguration": { + "type": "structure", + "members": { + "enableBusinessNameGeneration": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether automatic business name generation is to be enabled or not as part of\n the recommendation configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The recommendation to be updated as part of the UpdateDataSource\n action.

" + } + }, + "com.amazonaws.datazone#RedshiftClusterStorage": { + "type": "structure", + "members": { + "clusterName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of an Amazon Redshift cluster.

", + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[0-9a-z].[a-z0-9\\-]*$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the Amazon Redshift cluster storage.

" + } + }, + "com.amazonaws.datazone#RedshiftCredentialConfiguration": { + "type": "structure", + "members": { + "secretManagerArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of a secret manager for an Amazon Redshift cluster.

", + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^arn:aws[^:]*:secretsmanager:[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]:\\d{12}:secret:.*$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the credentials required to access an Amazon Redshift cluster.

" + } + }, + "com.amazonaws.datazone#RedshiftRunConfigurationInput": { + "type": "structure", + "members": { + "dataAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data access role included in the configuration details of the Amazon Redshift data\n source.

", + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]{1,128}$" + } + }, + "relationalFilterConfigurations": { + "target": "com.amazonaws.datazone#RelationalFilterConfigurations", + "traits": { + "smithy.api#documentation": "

The relational filger configurations included in the configuration details of the Amazon\n Redshift data source.

", + "smithy.api#required": {} + } + }, + "redshiftCredentialConfiguration": { + "target": "com.amazonaws.datazone#RedshiftCredentialConfiguration", + "traits": { + "smithy.api#required": {} + } + }, + "redshiftStorage": { + "target": "com.amazonaws.datazone#RedshiftStorage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details of the Amazon Redshift data source.

" + } + }, + "com.amazonaws.datazone#RedshiftRunConfigurationOutput": { + "type": "structure", + "members": { + "accountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account included in the configuration details of the Amazon Redshift\n data source.

", + "smithy.api#length": { + "min": 12, + "max": 12 + }, + "smithy.api#pattern": "^\\d{12}$" + } + }, + "region": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services region included in the configuration details of the Amazon Redshift\n data source.

", + "smithy.api#length": { + "min": 4, + "max": 16 + }, + "smithy.api#pattern": "[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]" + } + }, + "dataAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data access role included in the configuration details of the Amazon Redshift data\n source.

", + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]{1,128}$" + } + }, + "relationalFilterConfigurations": { + "target": "com.amazonaws.datazone#RelationalFilterConfigurations", + "traits": { + "smithy.api#documentation": "

The relational filger configurations included in the configuration details of the Amazon\n Redshift data source.

", + "smithy.api#required": {} + } + }, + "redshiftCredentialConfiguration": { + "target": "com.amazonaws.datazone#RedshiftCredentialConfiguration", + "traits": { + "smithy.api#required": {} + } + }, + "redshiftStorage": { + "target": "com.amazonaws.datazone#RedshiftStorage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details of the Amazon Redshift data source.

" + } + }, + "com.amazonaws.datazone#RedshiftServerlessStorage": { + "type": "structure", + "members": { + "workgroupName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon Redshift Serverless workgroup.

", + "smithy.api#length": { + "min": 3, + "max": 64 + }, + "smithy.api#pattern": "^[a-z0-9-]+$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the Amazon Redshift Serverless workgroup storage.

" + } + }, + "com.amazonaws.datazone#RedshiftStorage": { + "type": "union", + "members": { + "redshiftClusterSource": { + "target": "com.amazonaws.datazone#RedshiftClusterStorage", + "traits": { + "smithy.api#documentation": "

The details of the Amazon Redshift cluster source.

" + } + }, + "redshiftServerlessSource": { + "target": "com.amazonaws.datazone#RedshiftServerlessStorage", + "traits": { + "smithy.api#documentation": "

The details of the Amazon Redshift Serverless workgroup source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the Amazon Redshift storage as part of the configuration of an Amazon\n Redshift data source run.

" + } + }, + "com.amazonaws.datazone#RegionName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 4, + "max": 16 + }, + "smithy.api#pattern": "^[a-z]{2}-?(iso|gov)?-{1}[a-z]*-{1}[0-9]$" + } + }, + "com.amazonaws.datazone#RegionalParameter": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.datazone#RegionalParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.datazone#RegionName" + }, + "value": { + "target": "com.amazonaws.datazone#RegionalParameter" + } + }, + "com.amazonaws.datazone#RejectChoice": { + "type": "structure", + "members": { + "predictionTarget": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Specifies the target (for example, a column name) where a prediction can be\n rejected.

" + } + }, + "predictionChoices": { + "target": "com.amazonaws.datazone#PredictionChoices", + "traits": { + "smithy.api#documentation": "

Specifies the the automatically generated business metadata that can be rejected.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the automatically generated business metadata that is rejected.

" + } + }, + "com.amazonaws.datazone#RejectChoices": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#RejectChoice" + } + }, + "com.amazonaws.datazone#RejectPredictions": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#RejectPredictionsInput" + }, + "output": { + "target": "com.amazonaws.datazone#RejectPredictionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Rejects automatically generated business-friendly metadata for your Amazon DataZone\n assets.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/assets/{identifier}/reject-predictions" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#RejectPredictionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#AssetIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the prediction.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#httpQuery": "revision" + } + }, + "rejectRule": { + "target": "com.amazonaws.datazone#RejectRule", + "traits": { + "smithy.api#documentation": "

" + } + }, + "rejectChoices": { + "target": "com.amazonaws.datazone#RejectChoices", + "traits": { + "smithy.api#documentation": "

" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#RejectPredictionsOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + }, + "assetRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#RejectRule": { + "type": "structure", + "members": { + "rule": { + "target": "com.amazonaws.datazone#RejectRuleBehavior", + "traits": { + "smithy.api#documentation": "

Specifies whether you want to reject the top prediction for all targets or none.

" + } + }, + "threshold": { + "target": "smithy.api#Float", + "traits": { + "smithy.api#documentation": "

The confidence score that specifies the condition at which a prediction can be\n rejected.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the rule and the threshold under which a prediction can be rejected.

" + } + }, + "com.amazonaws.datazone#RejectRuleBehavior": { + "type": "enum", + "members": { + "ALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.datazone#RejectSubscriptionRequest": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#RejectSubscriptionRequestInput" + }, + "output": { + "target": "com.amazonaws.datazone#RejectSubscriptionRequestOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Rejects the specified subscription request.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}/reject" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#RejectSubscriptionRequestInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the subscription request was\n rejected.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request that was rejected.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the rejected subscription request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#RejectSubscriptionRequestOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request that was rejected.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was rejected.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription request.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the subscription request was\n rejected.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was rejected.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

The subscribed principals of the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

The subscribed listings of the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request reviewer.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the rejected subscription request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#RelationalFilterConfiguration": { + "type": "structure", + "members": { + "databaseName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The database name specified in the relational filter configuration for the data\n source.

", + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#required": {} + } + }, + "schemaName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The schema name specified in the relational filter configuration for the data\n source.

", + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "filterExpressions": { + "target": "com.amazonaws.datazone#FilterExpressions", + "traits": { + "smithy.api#documentation": "

The filter expressions specified in the relational filter configuration for the data\n source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The relational filter configuration for the data source.

" + } + }, + "com.amazonaws.datazone#RelationalFilterConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#RelationalFilterConfiguration" + } + }, + "com.amazonaws.datazone#RequestReason": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#Resource": { + "type": "structure", + "members": { + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of a provisioned resource of this Amazon DataZone environment.

" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of a provisioned resource of this Amazon DataZone environment.

" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value of a provisioned resource of this Amazon DataZone environment.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of a provisioned resource of this Amazon DataZone environment.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a provisioned resource of this Amazon DataZone environment.

" + } + }, + "com.amazonaws.datazone#ResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#Resource" + } + }, + "com.amazonaws.datazone#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The specified resource cannot be found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.datazone#Revision": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.datazone#RevokeSubscription": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#RevokeSubscriptionInput" + }, + "output": { + "target": "com.amazonaws.datazone#RevokeSubscriptionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Revokes a specified subscription in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/subscriptions/{identifier}/revoke" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#RevokeSubscriptionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where you want to revoke a subscription.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the revoked subscription.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether permissions are retained when the subscription is revoked.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#RevokeSubscriptionOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The identifier of the user who revoked the subscription.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who revoked the subscription.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain where you want to revoke a subscription.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was revoked.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was revoked.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipal": { + "target": "com.amazonaws.datazone#SubscribedPrincipal", + "traits": { + "smithy.api#documentation": "

The subscribed principal of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "subscribedListing": { + "target": "com.amazonaws.datazone#SubscribedListing", + "traits": { + "smithy.api#documentation": "

The subscribed listing of the revoked subscription.

", + "smithy.api#required": {} + } + }, + "subscriptionRequestId": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request for the revoked subscription.

" + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether permissions are retained when the subscription is revoked.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#RoleArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$" + } + }, + "com.amazonaws.datazone#RunStatisticsForAssets": { + "type": "structure", + "members": { + "added": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The added statistic for the data source run.

" + } + }, + "updated": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The updated statistic for the data source run.

" + } + }, + "unchanged": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The unchanged statistic for the data source run.

" + } + }, + "skipped": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The skipped statistic for the data source run.

" + } + }, + "failed": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The failed statistic for the data source run.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The asset statistics from the data source run.

" + } + }, + "com.amazonaws.datazone#ScheduleConfiguration": { + "type": "structure", + "members": { + "timezone": { + "target": "com.amazonaws.datazone#Timezone", + "traits": { + "smithy.api#documentation": "

The timezone of the data source run.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#CronString", + "traits": { + "smithy.api#documentation": "

The schedule of the data source runs.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the schedule of the data source runs.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#Search": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#SearchInput" + }, + "output": { + "target": "com.amazonaws.datazone#SearchOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches for assets in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/search" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + } + } + }, + "com.amazonaws.datazone#SearchGroupProfiles": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#SearchGroupProfilesInput" + }, + "output": { + "target": "com.amazonaws.datazone#SearchGroupProfilesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches group profiles in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/search-group-profiles" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#SearchGroupProfilesInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which you want to search group\n profiles.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "groupType": { + "target": "com.amazonaws.datazone#GroupSearchType", + "traits": { + "smithy.api#documentation": "

The group type for which to search.

", + "smithy.api#required": {} + } + }, + "searchText": { + "target": "com.amazonaws.datazone#GroupSearchText", + "traits": { + "smithy.api#documentation": "

Specifies the text for which to search.

" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call to\n SearchGroupProfiles. When the number of results to be listed is greater\n than the value of MaxResults, the response contains a NextToken\n value that you can use in a subsequent call to SearchGroupProfiles to list the\n next set of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchGroupProfiles to list the next set of\n results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#SearchGroupProfilesOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#GroupProfileSummaries", + "traits": { + "smithy.api#documentation": "

The results of the SearchGroupProfiles action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchGroupProfiles to list the next set of\n results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#SearchInItem": { + "type": "structure", + "members": { + "attribute": { + "target": "com.amazonaws.datazone#Attribute", + "traits": { + "smithy.api#documentation": "

The search attribute.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the search.

" + } + }, + "com.amazonaws.datazone#SearchInList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SearchInItem" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.datazone#SearchInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "owningProjectIdentifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the owning project specified for the search.

" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call to Search. When\n the number of results to be listed is greater than the value of MaxResults,\n the response contains a NextToken value that you can use in a subsequent call\n to Search to list the next set of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to Search to list the next set of results.

" + } + }, + "searchScope": { + "target": "com.amazonaws.datazone#InventorySearchScope", + "traits": { + "smithy.api#documentation": "

The scope of the search.

", + "smithy.api#required": {} + } + }, + "searchText": { + "target": "com.amazonaws.datazone#SearchText", + "traits": { + "smithy.api#documentation": "

Specifies the text for which to search.

" + } + }, + "searchIn": { + "target": "com.amazonaws.datazone#SearchInList", + "traits": { + "smithy.api#documentation": "

" + } + }, + "filters": { + "target": "com.amazonaws.datazone#FilterClause", + "traits": { + "smithy.api#documentation": "

Specifies the search filters.

" + } + }, + "sort": { + "target": "com.amazonaws.datazone#SearchSort", + "traits": { + "smithy.api#documentation": "

Specifies the way in which the search results are to be sorted.

" + } + }, + "additionalAttributes": { + "target": "com.amazonaws.datazone#SearchOutputAdditionalAttributes", + "traits": { + "smithy.api#documentation": "

Specifies additional attributes for the Search action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#SearchInventoryResultItem": { + "type": "union", + "members": { + "glossaryItem": { + "target": "com.amazonaws.datazone#GlossaryItem", + "traits": { + "smithy.api#documentation": "

The glossary item included in the search results.

" + } + }, + "glossaryTermItem": { + "target": "com.amazonaws.datazone#GlossaryTermItem", + "traits": { + "smithy.api#documentation": "

The glossary term item included in the search results.

" + } + }, + "assetItem": { + "target": "com.amazonaws.datazone#AssetItem", + "traits": { + "smithy.api#documentation": "

The asset item included in the search results.

" + } + }, + "dataProductItem": { + "target": "com.amazonaws.datazone#DataProductSummary", + "traits": { + "smithy.api#documentation": "

The data product item included in the search results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the search results.

" + } + }, + "com.amazonaws.datazone#SearchInventoryResultItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SearchInventoryResultItem" + } + }, + "com.amazonaws.datazone#SearchListings": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#SearchListingsInput" + }, + "output": { + "target": "com.amazonaws.datazone#SearchListingsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches listings in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/listings/search" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + } + } + }, + "com.amazonaws.datazone#SearchListingsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain in which to search listings.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "searchText": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Specifies the text for which to search.

" + } + }, + "searchIn": { + "target": "com.amazonaws.datazone#SearchInList", + "traits": { + "smithy.api#documentation": "

" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call to SearchListings.\n When the number of results to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that you\n can use in a subsequent call to SearchListings to list the next set of\n results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchListings to list the next set of\n results.

" + } + }, + "filters": { + "target": "com.amazonaws.datazone#FilterClause", + "traits": { + "smithy.api#documentation": "

Specifies the filters for the search of listings.

" + } + }, + "sort": { + "target": "com.amazonaws.datazone#SearchSort", + "traits": { + "smithy.api#documentation": "

Specifies the way for sorting the search results.

" + } + }, + "additionalAttributes": { + "target": "com.amazonaws.datazone#SearchOutputAdditionalAttributes", + "traits": { + "smithy.api#documentation": "

Specifies additional attributes for the search.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#SearchListingsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#SearchResultItems", + "traits": { + "smithy.api#documentation": "

The results of the SearchListings action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchListings to list the next set of\n results.

" + } + }, + "totalMatchCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Total number of search results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#SearchOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#SearchInventoryResultItems", + "traits": { + "smithy.api#documentation": "

The results of the Search action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to Search to list the next set of results.

" + } + }, + "totalMatchCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Total number of search results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#SearchOutputAdditionalAttribute": { + "type": "enum", + "members": { + "FORMS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORMS" + } + } + } + }, + "com.amazonaws.datazone#SearchOutputAdditionalAttributes": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SearchOutputAdditionalAttribute" + }, + "traits": { + "smithy.api#uniqueItems": {} + } + }, + "com.amazonaws.datazone#SearchResultItem": { + "type": "union", + "members": { + "assetListing": { + "target": "com.amazonaws.datazone#AssetListingItem", + "traits": { + "smithy.api#documentation": "

The asset listing included in the results of the SearchListings\n action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the results of the SearchListings action.

" + } + }, + "com.amazonaws.datazone#SearchResultItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SearchResultItem" + } + }, + "com.amazonaws.datazone#SearchSort": { + "type": "structure", + "members": { + "attribute": { + "target": "com.amazonaws.datazone#Attribute", + "traits": { + "smithy.api#documentation": "

The attribute detail of the way to sort search results.

", + "smithy.api#required": {} + } + }, + "order": { + "target": "com.amazonaws.datazone#SortOrder", + "traits": { + "smithy.api#documentation": "

The order detail of the wya to sort search results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the way to sort search results.

" + } + }, + "com.amazonaws.datazone#SearchText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + } + } + }, + "com.amazonaws.datazone#SearchTypes": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#SearchTypesInput" + }, + "output": { + "target": "com.amazonaws.datazone#SearchTypesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches for types in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/types-search" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + } + } + }, + "com.amazonaws.datazone#SearchTypesInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to invoke the SearchTypes\n action.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call to SearchTypes.\n When the number of results to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that you\n can use in a subsequent call to SearchTypes to list the next set of results.\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchTypes to list the next set of\n results.

" + } + }, + "searchScope": { + "target": "com.amazonaws.datazone#TypesSearchScope", + "traits": { + "smithy.api#documentation": "

Specifies the scope of the search for types.

", + "smithy.api#required": {} + } + }, + "searchText": { + "target": "com.amazonaws.datazone#SearchText", + "traits": { + "smithy.api#documentation": "

Specifies the text for which to search.

" + } + }, + "searchIn": { + "target": "com.amazonaws.datazone#SearchInList", + "traits": { + "smithy.api#documentation": "

" + } + }, + "filters": { + "target": "com.amazonaws.datazone#FilterClause", + "traits": { + "smithy.api#documentation": "

The filters for the SearchTypes action.

" + } + }, + "sort": { + "target": "com.amazonaws.datazone#SearchSort", + "traits": { + "smithy.api#documentation": "

The specifies the way to sort the SearchTypes results.

" + } + }, + "managed": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#SearchTypesOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#SearchTypesResultItems", + "traits": { + "smithy.api#documentation": "

The results of the SearchTypes action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchTypes to list the next set of\n results.

" + } + }, + "totalMatchCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Total number of search results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#SearchTypesResultItem": { + "type": "union", + "members": { + "assetTypeItem": { + "target": "com.amazonaws.datazone#AssetTypeItem", + "traits": { + "smithy.api#documentation": "

The asset type included in the results of the SearchTypes action.

" + } + }, + "formTypeItem": { + "target": "com.amazonaws.datazone#FormTypeData", + "traits": { + "smithy.api#documentation": "

The form type included in the results of the SearchTypes action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the results of the SearchTypes action.

" + } + }, + "com.amazonaws.datazone#SearchTypesResultItems": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SearchTypesResultItem" + } + }, + "com.amazonaws.datazone#SearchUserProfiles": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#SearchUserProfilesInput" + }, + "output": { + "target": "com.amazonaws.datazone#SearchUserProfilesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches user profiles in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/search-user-profiles" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#SearchUserProfilesInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which you want to search user\n profiles.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userType": { + "target": "com.amazonaws.datazone#UserSearchType", + "traits": { + "smithy.api#documentation": "

Specifies the user type for the SearchUserProfiles action.

", + "smithy.api#required": {} + } + }, + "searchText": { + "target": "com.amazonaws.datazone#UserSearchText", + "traits": { + "smithy.api#documentation": "

Specifies the text for which to search.

" + } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in a single call to\n SearchUserProfiles. When the number of results to be listed is greater than\n the value of MaxResults, the response contains a NextToken value\n that you can use in a subsequent call to SearchUserProfiles to list the next\n set of results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchUserProfiles to list the next set of\n results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#SearchUserProfilesOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#UserProfileSummaries", + "traits": { + "smithy.api#documentation": "

The results of the SearchUserProfiles action.

" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of results is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of results, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to SearchUserProfiles to list the next set of\n results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request has exceeded the specified service quota.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.datazone#ShortDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1024 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#SingleSignOn": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.datazone#AuthType", + "traits": { + "smithy.api#documentation": "

The type of single sign-on in Amazon DataZone.

" + } + }, + "userAssignment": { + "target": "com.amazonaws.datazone#UserAssignment", + "traits": { + "smithy.api#documentation": "

The single sign-on user assignment in Amazon DataZone.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The single sign-on details in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#Smithy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10000 + } + } + }, + "com.amazonaws.datazone#SortFieldProject": { + "type": "enum", + "members": { + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NAME" + } + } + } + }, + "com.amazonaws.datazone#SortKey": { + "type": "enum", + "members": { + "CREATED_AT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED_AT" + } + }, + "UPDATED_AT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATED_AT" + } + } + } + }, + "com.amazonaws.datazone#SortOrder": { + "type": "enum", + "members": { + "ASCENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASCENDING" + } + }, + "DESCENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DESCENDING" + } + } + } + }, + "com.amazonaws.datazone#SsoUserProfileDetails": { + "type": "structure", + "members": { + "username": { + "target": "com.amazonaws.datazone#UserProfileName", + "traits": { + "smithy.api#documentation": "

The username included in the single sign-on details of the user profile.

" + } + }, + "firstName": { + "target": "com.amazonaws.datazone#FirstName", + "traits": { + "smithy.api#documentation": "

The first name included in the single sign-on details of the user profile.

" + } + }, + "lastName": { + "target": "com.amazonaws.datazone#LastName", + "traits": { + "smithy.api#documentation": "

The last name included in the single sign-on details of the user profile.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The single sign-on details of the user profile.

" + } + }, + "com.amazonaws.datazone#StartDataSourceRun": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#StartDataSourceRunInput" + }, + "output": { + "target": "com.amazonaws.datazone#StartDataSourceRunOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Start the run of the specified data source in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/data-sources/{dataSourceIdentifier}/runs" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#StartDataSourceRunInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to start a data source run.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "dataSourceIdentifier": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#StartDataSourceRunOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which to start a data source run.

", + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#DataSourceRunId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source run.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source run.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceRunType", + "traits": { + "smithy.api#documentation": "

The type of the data source run.

", + "smithy.api#required": {} + } + }, + "dataSourceConfigurationSnapshot": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The configuration snapshot of the data source that is being run.

" + } + }, + "runStatisticsForAssets": { + "target": "com.amazonaws.datazone#RunStatisticsForAssets", + "traits": { + "smithy.api#documentation": "

Specifies run statistics for assets.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when data source run was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source run was updated.

", + "smithy.api#required": {} + } + }, + "startedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source run was started.

" + } + }, + "stoppedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source run was stopped.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#SubscribedAsset": { + "type": "structure", + "members": { + "assetId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of the asset for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "assetRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the asset for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantStatus", + "traits": { + "smithy.api#documentation": "

The status of the asset for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "targetName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The target name of the asset for which the subscription grant is created.

" + } + }, + "failureCause": { + "target": "com.amazonaws.datazone#FailureCause", + "traits": { + "smithy.api#documentation": "

The failure cause included in the details of the asset for which the subscription grant\n is created.

" + } + }, + "grantedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant to the asset is created.

" + } + }, + "failureTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The failure timestamp included in the details of the asset for which the subscription\n grant is created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the asset for which the subscription grant is created.

" + } + }, + "com.amazonaws.datazone#SubscribedAssetListing": { + "type": "structure", + "members": { + "entityId": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of the published asset for which the subscription grant is\n created.

" + } + }, + "entityRevision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the published asset for which the subscription grant is created.

" + } + }, + "entityType": { + "target": "com.amazonaws.datazone#TypeName", + "traits": { + "smithy.api#documentation": "

The type of the published asset for which the subscription grant is created.

" + } + }, + "forms": { + "target": "com.amazonaws.datazone#Forms", + "traits": { + "smithy.api#documentation": "

The forms attached to the published asset for which the subscription grant is\n created.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#DetailedGlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms attached to the published asset for which the subscription grant is\n created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the published asset for which the subscription grant is created.

" + } + }, + "com.amazonaws.datazone#SubscribedAssets": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscribedAsset" + } + }, + "com.amazonaws.datazone#SubscribedListing": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of the published asset for which the subscription grant is\n created.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.datazone#Revision", + "traits": { + "smithy.api#documentation": "

The revision of the published asset for which the subscription grant is created.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#ListingName", + "traits": { + "smithy.api#documentation": "

The name of the published asset for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the published asset for which the subscription grant is\n created.

", + "smithy.api#required": {} + } + }, + "item": { + "target": "com.amazonaws.datazone#SubscribedListingItem", + "traits": { + "smithy.api#documentation": "

The published asset for which the subscription grant is created.

", + "smithy.api#required": {} + } + }, + "ownerProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project of the published asset for which the subscription grant is\n created.

", + "smithy.api#required": {} + } + }, + "ownerProjectName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the project that owns the published asset for which the subscription grant\n is created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the published asset for which the subscription grant is created.

" + } + }, + "com.amazonaws.datazone#SubscribedListingInput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#ListingId", + "traits": { + "smithy.api#documentation": "

The identifier of the published asset for which the subscription grant is to be\n created.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The published asset for which the subscription grant is to be created.

" + } + }, + "com.amazonaws.datazone#SubscribedListingInputs": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscribedListingInput" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.datazone#SubscribedListingItem": { + "type": "union", + "members": { + "assetListing": { + "target": "com.amazonaws.datazone#SubscribedAssetListing", + "traits": { + "smithy.api#documentation": "

The asset for which the subscription grant is created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The published asset for which the subscription grant is created.

" + } + }, + "com.amazonaws.datazone#SubscribedListings": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscribedListing" + } + }, + "com.amazonaws.datazone#SubscribedPrincipal": { + "type": "union", + "members": { + "project": { + "target": "com.amazonaws.datazone#SubscribedProject", + "traits": { + "smithy.api#documentation": "

The project that has the subscription grant.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The principal that has the subscription grant for the asset.

" + } + }, + "com.amazonaws.datazone#SubscribedPrincipalInput": { + "type": "union", + "members": { + "project": { + "target": "com.amazonaws.datazone#SubscribedProjectInput", + "traits": { + "smithy.api#documentation": "

The project that is to be given a subscription grant.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The principal that is to be given a subscriptiong grant.

" + } + }, + "com.amazonaws.datazone#SubscribedPrincipalInputs": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscribedPrincipalInput" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.datazone#SubscribedPrincipals": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscribedPrincipal" + } + }, + "com.amazonaws.datazone#SubscribedProject": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that has the subscription grant.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name of the project that has the subscription grant.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The project that has the subscription grant.

" + } + }, + "com.amazonaws.datazone#SubscribedProjectInput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that is to be given a subscription grant.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The project that is to be given a subscription grant.

" + } + }, + "com.amazonaws.datazone#SubscriptionGrantId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#SubscriptionGrantOverallStatus": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "GRANT_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_FAILED" + } + }, + "REVOKE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_FAILED" + } + }, + "GRANT_AND_REVOKE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_AND_REVOKE_FAILED" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "INACCESSIBLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACCESSIBLE" + } + } + } + }, + "com.amazonaws.datazone#SubscriptionGrantStatus": { + "type": "enum", + "members": { + "GRANT_PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_PENDING" + } + }, + "REVOKE_PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_PENDING" + } + }, + "GRANT_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_IN_PROGRESS" + } + }, + "REVOKE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_IN_PROGRESS" + } + }, + "GRANTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANTED" + } + }, + "REVOKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKED" + } + }, + "GRANT_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_FAILED" + } + }, + "REVOKE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_FAILED" + } + } + } + }, + "com.amazonaws.datazone#SubscriptionGrantSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription grant.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The datazone user who created the subscription grant.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription grant exists.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when a subscription grant was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestampf of when the subscription grant was updated.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The identifier of the target of the subscription grant.

", + "smithy.api#required": {} + } + }, + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", + "traits": { + "smithy.api#documentation": "

The entity to which the subscription is granted.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription grant.

", + "smithy.api#required": {} + } + }, + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", + "traits": { + "smithy.api#documentation": "

The assets included in the subscription grant.

" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The ID of the subscription grant.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the subscription grant.

" + } + }, + "com.amazonaws.datazone#SubscriptionGrants": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscriptionGrantSummary" + } + }, + "com.amazonaws.datazone#SubscriptionId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#SubscriptionRequestId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#SubscriptionRequestStatus": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING" + } + }, + "ACCEPTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCEPTED" + } + }, + "REJECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REJECTED" + } + } + } + }, + "com.amazonaws.datazone#SubscriptionRequestSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription request.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone user who updated the subscription request.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription request exists.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when a subscription request was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the subscription request.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

The principals included in the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

The listings included in the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request reviewer.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the subscription request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the subscription request.

" + } + }, + "com.amazonaws.datazone#SubscriptionRequests": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscriptionRequestSummary" + } + }, + "com.amazonaws.datazone#SubscriptionStatus": { + "type": "enum", + "members": { + "APPROVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APPROVED" + } + }, + "REVOKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKED" + } + }, + "CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLED" + } + } + } + }, + "com.amazonaws.datazone#SubscriptionSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription exists.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription was updated.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipal": { + "target": "com.amazonaws.datazone#SubscribedPrincipal", + "traits": { + "smithy.api#documentation": "

The principal included in the subscription.

", + "smithy.api#required": {} + } + }, + "subscribedListing": { + "target": "com.amazonaws.datazone#SubscribedListing", + "traits": { + "smithy.api#documentation": "

The listing included in the subscription.

", + "smithy.api#required": {} + } + }, + "subscriptionRequestId": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request for the subscription.

" + } + }, + "retainPermissions": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The retain permissions included in the subscription.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the subscription.

" + } + }, + "com.amazonaws.datazone#SubscriptionTargetForm": { + "type": "structure", + "members": { + "formName": { + "target": "com.amazonaws.datazone#FormName", + "traits": { + "smithy.api#documentation": "

The form name included in the subscription target configuration.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The content of the subscription target configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the subscription target configuration.

" + } + }, + "com.amazonaws.datazone#SubscriptionTargetForms": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscriptionTargetForm" + } + }, + "com.amazonaws.datazone#SubscriptionTargetId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#SubscriptionTargetName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#SubscriptionTargetSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription target.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorized principals included in the subscription target.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the subscription target exists.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project specified in the subscription target.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment of the subscription target.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name of the subscription target.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type of the subscription target.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription target.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription target.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was updated.

" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role specified in the subscription target.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The asset types included in the subscription target.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration of the subscription target.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider of the subscription target.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the subscription target.

" + } + }, + "com.amazonaws.datazone#SubscriptionTargets": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscriptionTargetSummary" + } + }, + "com.amazonaws.datazone#Subscriptions": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SubscriptionSummary" + } + }, + "com.amazonaws.datazone#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\w \\.:/=+@-]+$" + } + }, + "com.amazonaws.datazone#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#TagKey" + } + }, + "com.amazonaws.datazone#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.datazone#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Tags a resource in Amazon DataZone.

", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{resourceArn}" + } + } + }, + "com.amazonaws.datazone#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the resource to be tagged in Amazon DataZone.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.datazone#Tags", + "traits": { + "smithy.api#documentation": "

Specifies the tags for the TagResource action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^[\\w \\.:/=+@-]*$" + } + }, + "com.amazonaws.datazone#Tags": { + "type": "map", + "key": { + "target": "com.amazonaws.datazone#TagKey" + }, + "value": { + "target": "com.amazonaws.datazone#TagValue" + } + }, + "com.amazonaws.datazone#TaskId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, + "com.amazonaws.datazone#TaskStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACTIVE" + } + } + } + }, + "com.amazonaws.datazone#TermRelations": { + "type": "structure", + "members": { + "isA": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The isA property of the term relations.

", + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "classifies": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The classifies of the term relations.

", + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the term relations.

" + } + }, + "com.amazonaws.datazone#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request was denied due to request throttling.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.datazone#Timezone": { + "type": "enum", + "members": { + "UTC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UTC" + } + }, + "AFRICA_JOHANNESBURG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AFRICA_JOHANNESBURG" + } + }, + "AMERICA_MONTREAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMERICA_MONTREAL" + } + }, + "AMERICA_SAO_PAULO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMERICA_SAO_PAULO" + } + }, + "ASIA_BAHRAIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_BAHRAIN" + } + }, + "ASIA_BANGKOK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_BANGKOK" + } + }, + "ASIA_CALCUTTA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_CALCUTTA" + } + }, + "ASIA_DUBAI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_DUBAI" + } + }, + "ASIA_HONG_KONG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_HONG_KONG" + } + }, + "ASIA_JAKARTA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_JAKARTA" + } + }, + "ASIA_KUALA_LUMPUR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_KUALA_LUMPUR" + } + }, + "ASIA_SEOUL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_SEOUL" + } + }, + "ASIA_SHANGHAI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_SHANGHAI" + } + }, + "ASIA_SINGAPORE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_SINGAPORE" + } + }, + "ASIA_TAIPEI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_TAIPEI" + } + }, + "ASIA_TOKYO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASIA_TOKYO" + } + }, + "AUSTRALIA_MELBOURNE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUSTRALIA_MELBOURNE" + } + }, + "AUSTRALIA_SYDNEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUSTRALIA_SYDNEY" + } + }, + "CANADA_CENTRAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANADA_CENTRAL" + } + }, + "CET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CET" + } + }, + "CST6CDT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CST6CDT" + } + }, + "ETC_GMT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT" + } + }, + "ETC_GMT0": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT0" + } + }, + "ETC_GMT_ADD_0": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_0" + } + }, + "ETC_GMT_ADD_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_1" + } + }, + "ETC_GMT_ADD_10": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_10" + } + }, + "ETC_GMT_ADD_11": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_11" + } + }, + "ETC_GMT_ADD_12": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_12" + } + }, + "ETC_GMT_ADD_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_2" + } + }, + "ETC_GMT_ADD_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_3" + } + }, + "ETC_GMT_ADD_4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_4" + } + }, + "ETC_GMT_ADD_5": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_5" + } + }, + "ETC_GMT_ADD_6": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_6" + } + }, + "ETC_GMT_ADD_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_7" + } + }, + "ETC_GMT_ADD_8": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_8" + } + }, + "ETC_GMT_ADD_9": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_ADD_9" + } + }, + "ETC_GMT_NEG_0": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_0" + } + }, + "ETC_GMT_NEG_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_1" + } + }, + "ETC_GMT_NEG_10": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_10" + } + }, + "ETC_GMT_NEG_11": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_11" + } + }, + "ETC_GMT_NEG_12": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_12" + } + }, + "ETC_GMT_NEG_13": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_13" + } + }, + "ETC_GMT_NEG_14": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_14" + } + }, + "ETC_GMT_NEG_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_2" + } + }, + "ETC_GMT_NEG_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_3" + } + }, + "ETC_GMT_NEG_4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_4" + } + }, + "ETC_GMT_NEG_5": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_5" + } + }, + "ETC_GMT_NEG_6": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_6" + } + }, + "ETC_GMT_NEG_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_7" + } + }, + "ETC_GMT_NEG_8": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_8" + } + }, + "ETC_GMT_NEG_9": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ETC_GMT_NEG_9" + } + }, + "EUROPE_DUBLIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EUROPE_DUBLIN" + } + }, + "EUROPE_LONDON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EUROPE_LONDON" + } + }, + "EUROPE_PARIS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EUROPE_PARIS" + } + }, + "EUROPE_STOCKHOLM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EUROPE_STOCKHOLM" + } + }, + "EUROPE_ZURICH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EUROPE_ZURICH" + } + }, + "ISRAEL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ISRAEL" + } + }, + "MEXICO_GENERAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEXICO_GENERAL" + } + }, + "MST7MDT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MST7MDT" + } + }, + "PACIFIC_AUCKLAND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PACIFIC_AUCKLAND" + } + }, + "US_CENTRAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_CENTRAL" + } + }, + "US_EASTERN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_EASTERN" + } + }, + "US_MOUNTAIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_MOUNTAIN" + } + }, + "US_PACIFIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_PACIFIC" + } + } + } + }, + "com.amazonaws.datazone#Title": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1000 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#Topic": { + "type": "structure", + "members": { + "subject": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The subject of the resource mentioned in a notification.

", + "smithy.api#required": {} + } + }, + "resource": { + "target": "com.amazonaws.datazone#NotificationResource", + "traits": { + "smithy.api#required": {} + } + }, + "role": { + "target": "com.amazonaws.datazone#NotificationRole", + "traits": { + "smithy.api#documentation": "

The role of the resource mentioned in a notification.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The topic of the notification.

" + } + }, + "com.amazonaws.datazone#TypeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[^\\.]*" + } + }, + "com.amazonaws.datazone#TypesSearchScope": { + "type": "enum", + "members": { + "ASSET_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSET_TYPE" + } + }, + "FORM_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORM_TYPE" + } + } + } + }, + "com.amazonaws.datazone#UnauthorizedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

You do not have permission to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 401 + } + }, + "com.amazonaws.datazone#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.datazone#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Untags a resource in Amazon DataZone.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the resource to be untagged in Amazon DataZone.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.datazone#TagKeyList", + "traits": { + "smithy.api#documentation": "

Specifies the tag keys for the UntagResource action.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateDataSource": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateDataSourceInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateDataSourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified data source in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/data-sources/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateDataSourceInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain in which to update a data source.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateDataSource action.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateDataSource\n action.

" + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

The enable setting to be updated as part of the UpdateDataSource\n action.

" + } + }, + "publishOnImport": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The publish on import setting to be updated as part of the UpdateDataSource\n action.

" + } + }, + "assetFormsInput": { + "target": "com.amazonaws.datazone#FormInputList", + "traits": { + "smithy.api#documentation": "

The asset forms to be updated as part of the UpdateDataSource\n action.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", + "traits": { + "smithy.api#documentation": "

The schedule to be updated as part of the UpdateDataSource action.

" + } + }, + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationInput", + "traits": { + "smithy.api#documentation": "

The configuration to be updated as part of the UpdateDataSource\n action.

", + "smithy.api#notProperty": {} + } + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", + "traits": { + "smithy.api#documentation": "

The recommendation to be updated as part of the UpdateDataSource\n action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateDataSourceOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DataSourceId", + "traits": { + "smithy.api#documentation": "

The identifier of the data source to be updated.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateDataSource action.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#DataSourceType", + "traits": { + "smithy.api#documentation": "

The type to be updated as part of the UpdateDataSource action.

" + } + }, + "name": { + "target": "com.amazonaws.datazone#Name", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateDataSource action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateDataSource\n action.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a data source is to be updated.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project where data source is to be updated.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment in which a data source is to be updated.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.datazone#DataSourceConfigurationOutput", + "traits": { + "smithy.api#documentation": "

The configuration to be updated as part of the UpdateDataSource\n action.

", + "smithy.api#notProperty": {} + } + }, + "recommendation": { + "target": "com.amazonaws.datazone#RecommendationConfiguration", + "traits": { + "smithy.api#documentation": "

The recommendation to be updated as part of the UpdateDataSource\n action.

" + } + }, + "enableSetting": { + "target": "com.amazonaws.datazone#EnableSetting", + "traits": { + "smithy.api#documentation": "

The enable setting to be updated as part of the UpdateDataSource\n action.

" + } + }, + "publishOnImport": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The publish on import setting to be updated as part of the UpdateDataSource\n action.

" + } + }, + "assetFormsOutput": { + "target": "com.amazonaws.datazone#FormOutputList", + "traits": { + "smithy.api#documentation": "

The asset forms to be updated as part of the UpdateDataSource\n action.

" + } + }, + "schedule": { + "target": "com.amazonaws.datazone#ScheduleConfiguration", + "traits": { + "smithy.api#documentation": "

The schedule to be updated as part of the UpdateDataSource action.

" + } + }, + "lastRunStatus": { + "target": "com.amazonaws.datazone#DataSourceRunStatus", + "traits": { + "smithy.api#documentation": "

The last run status of the data source.

" + } + }, + "lastRunAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was last run.

" + } + }, + "lastRunErrorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

The last run error message of the data source.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.datazone#DataSourceErrorMessage", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was updated.

" + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#DateTime", + "traits": { + "smithy.api#documentation": "

The timestamp of when the data source was updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateDomain": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateDomainInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateDomainOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a Amazon DataZone domain.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{identifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#UpdateDomainInput": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services domain that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateDomain action.

" + } + }, + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", + "traits": { + "smithy.api#documentation": "

The single sign-on option to be updated as part of the UpdateDomain\n action.

" + } + }, + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The domain execution role to be updated as part of the UpdateDomain\n action.

" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateDomain action.

" + } + }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#httpQuery": "clientToken", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateDomainOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateDomain action.

" + } + }, + "singleSignOn": { + "target": "com.amazonaws.datazone#SingleSignOn", + "traits": { + "smithy.api#documentation": "

The single sign-on option of the Amazon DataZone domain.

" + } + }, + "domainExecutionRole": { + "target": "com.amazonaws.datazone#RoleArn", + "traits": { + "smithy.api#documentation": "

The domain execution role to be updated as part of the UpdateDomain\n action.

" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateDomain action.

" + } + }, + "lastUpdatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

Specifies the timestamp of when the domain was last updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateEnvironment": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateEnvironmentInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateEnvironmentOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified environment in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/environments/{identifier}" + } + } + }, + "com.amazonaws.datazone#UpdateEnvironmentInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain in which the environment is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateEnvironment action.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms to be updated as part of the UpdateEnvironment\n action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateEnvironmentOutput": { + "type": "structure", + "members": { + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The project identifier of the environment.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment that is to be updated.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the domain in which the environment is to be updated.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the environment.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateEnvironment action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "environmentProfileId": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The profile identifier of the environment.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services account in which the environment is to be\n updated.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region in which the environment is updated.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider identifier of the environment.

", + "smithy.api#required": {} + } + }, + "provisionedResources": { + "target": "com.amazonaws.datazone#ResourceList", + "traits": { + "smithy.api#documentation": "

The provisioned resources to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#EnvironmentStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateEnvironment action.

" + } + }, + "environmentActions": { + "target": "com.amazonaws.datazone#EnvironmentActionList", + "traits": { + "smithy.api#documentation": "

The environment actions to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "lastDeployment": { + "target": "com.amazonaws.datazone#Deployment", + "traits": { + "smithy.api#documentation": "

The last deployment of the environment.

" + } + }, + "provisioningProperties": { + "target": "com.amazonaws.datazone#ProvisioningProperties", + "traits": { + "smithy.api#documentation": "

The provisioning properties to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "deploymentProperties": { + "target": "com.amazonaws.datazone#DeploymentProperties", + "traits": { + "smithy.api#documentation": "

The deployment properties to be updated as part of the UpdateEnvironment\n action.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The blueprint identifier of the environment.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateEnvironmentProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateEnvironmentProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateEnvironmentProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified environment profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/environment-profiles/{identifier}" + } + } + }, + "com.amazonaws.datazone#UpdateEnvironmentProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which an environment profile is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment profile that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateEnvironmentProfile\n action.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateEnvironmentProfile\n action.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#EnvironmentParametersList", + "traits": { + "smithy.api#documentation": "

The user parameters to be updated as part of the UpdateEnvironmentProfile\n action.

" + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account in which a specified environment profile is to be\n udpated.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region in which a specified environment profile is to be\n updated.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateEnvironmentProfileOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#EnvironmentProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment profile that is to be udpated.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which the environment profile is to be\n updated.

", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.datazone#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account in which a specified environment profile is to be\n udpated.

" + } + }, + "awsAccountRegion": { + "target": "com.amazonaws.datazone#AwsRegion", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region in which a specified environment profile is to be\n updated.

" + } + }, + "createdBy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the environment profile.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment profile was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the environment profile was updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "name": { + "target": "com.amazonaws.datazone#EnvironmentProfileName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateEnvironmentProfile\n action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateEnvironmentProfile\n action.

" + } + }, + "environmentBlueprintId": { + "target": "com.amazonaws.datazone#EnvironmentBlueprintId", + "traits": { + "smithy.api#documentation": "

The identifier of the blueprint of the environment profile that is to be updated.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project of the environment profile that is to be updated.

" + } + }, + "userParameters": { + "target": "com.amazonaws.datazone#CustomParameterList", + "traits": { + "smithy.api#documentation": "

The user parameters to be updated as part of the UpdateEnvironmentProfile\n action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateGlossary": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateGlossaryInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateGlossaryOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the business glossary in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/glossaries/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateGlossaryInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a business glossary is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateGlossary action.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateGlossary action.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateGlossary action.

" + } + }, + "clientToken": { + "target": "com.amazonaws.datazone#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that is provided to ensure the idempotency of the\n request.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateGlossaryOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a business glossary is to be\n updated.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary that is to be updated.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateGlossary action.

", + "smithy.api#required": {} + } + }, + "owningProjectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project in which to update a business glossary.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#GlossaryDescription", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateGlossary action.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateGlossary action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateGlossaryTerm": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateGlossaryTermInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateGlossaryTermOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a business glossary term in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/glossary-terms/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateGlossaryTermInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a business glossary term is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "glossaryIdentifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary in which a term is to be updated.

" + } + }, + "identifier": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary term that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateGlossaryTerm action.

" + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short description to be updated as part of the UpdateGlossaryTerm\n action.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description to be updated as part of the UpdateGlossaryTerm\n action.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The term relations to be updated as part of the UpdateGlossaryTerm\n action.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateGlossaryTerm action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateGlossaryTermOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#GlossaryTermId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary term that is to be updated.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a business glossary term is to be\n updated.

", + "smithy.api#required": {} + } + }, + "glossaryId": { + "target": "com.amazonaws.datazone#GlossaryId", + "traits": { + "smithy.api#documentation": "

The identifier of the business glossary in which a term is to be updated.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#GlossaryTermName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateGlossaryTerm action.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#GlossaryTermStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateGlossaryTerm action.

", + "smithy.api#required": {} + } + }, + "shortDescription": { + "target": "com.amazonaws.datazone#ShortDescription", + "traits": { + "smithy.api#documentation": "

The short description to be updated as part of the UpdateGlossaryTerm\n action.

" + } + }, + "longDescription": { + "target": "com.amazonaws.datazone#LongDescription", + "traits": { + "smithy.api#documentation": "

The long description to be updated as part of the UpdateGlossaryTerm\n action.

" + } + }, + "termRelations": { + "target": "com.amazonaws.datazone#TermRelations", + "traits": { + "smithy.api#documentation": "

The term relations to be updated as part of the UpdateGlossaryTerm\n action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateGroupProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateGroupProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateGroupProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified group profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/group-profiles/{groupIdentifier}" + }, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#UpdateGroupProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a group profile is updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "groupIdentifier": { + "target": "com.amazonaws.datazone#GroupIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the group profile that is updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the group profile that is updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateGroupProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a group profile is updated.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#GroupProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the group profile that is updated.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#GroupProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the group profile that is updated.

" + } + }, + "groupName": { + "target": "com.amazonaws.datazone#GroupProfileName", + "traits": { + "smithy.api#documentation": "

The name of the group profile that is updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateProjectInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateProjectOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified project in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/projects/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateProjectInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a project is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateProject action.

" + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description to be updated as part of the UpdateProject action.

" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms to be updated as part of the UpdateProject\n action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateProjectOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a project is updated.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project that is to be updated.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#ProjectName", + "traits": { + "smithy.api#documentation": "

The name of the project that is to be updated.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.datazone#Description", + "traits": { + "smithy.api#documentation": "

The description of the project that is to be updated.

" + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the project.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was created.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "lastUpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the project was last updated.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "glossaryTerms": { + "target": "com.amazonaws.datazone#GlossaryTerms", + "traits": { + "smithy.api#documentation": "

The glossary terms of the project that are to be updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionGrantStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateSubscriptionGrantStatusInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateSubscriptionGrantStatusOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the status of the specified subscription grant status in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/subscription-grants/{identifier}/status/{assetIdentifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionGrantStatusInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription grant status is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription grant the status of which is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "assetIdentifier": { + "target": "com.amazonaws.datazone#AssetId", + "traits": { + "smithy.api#documentation": "

The identifier of the asset the subscription grant status of which is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateSubscriptionGrantStatus\n action.

", + "smithy.api#required": {} + } + }, + "failureCause": { + "target": "com.amazonaws.datazone#FailureCause", + "traits": { + "smithy.api#documentation": "

Specifies the error message that is returned if the operation cannot be successfully\n completed.

" + } + }, + "targetName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The target name to be updated as part of the UpdateSubscriptionGrantStatus\n action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionGrantStatusOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionGrantId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription grant.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone domain user who created the subscription grant status.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription grant status.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription grant status is to be\n updated.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant status was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription grant status is to be updated.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetId": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription target whose subscription grant status is to be\n updated.

", + "smithy.api#required": {} + } + }, + "grantedEntity": { + "target": "com.amazonaws.datazone#GrantedEntity", + "traits": { + "smithy.api#documentation": "

The granted entity to be updated as part of the\n UpdateSubscriptionGrantStatus action.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionGrantOverallStatus", + "traits": { + "smithy.api#documentation": "

The status to be updated as part of the UpdateSubscriptionGrantStatus\n action.

", + "smithy.api#required": {} + } + }, + "assets": { + "target": "com.amazonaws.datazone#SubscribedAssets", + "traits": { + "smithy.api#documentation": "

" + } + }, + "subscriptionId": { + "target": "com.amazonaws.datazone#SubscriptionId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionRequest": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateSubscriptionRequestInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateSubscriptionRequestOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a specified subscription request in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/subscription-requests/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionRequestInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription request is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the UpdateSubscriptionRequest action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionRequestOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionRequestId", + "traits": { + "smithy.api#documentation": "

The identifier of the subscription request that is to be updated.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription request.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription request.

" + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription request is to be\n updated.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.datazone#SubscriptionRequestStatus", + "traits": { + "smithy.api#documentation": "

The status of the subscription request.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription request was updated.

", + "smithy.api#required": {} + } + }, + "requestReason": { + "target": "com.amazonaws.datazone#RequestReason", + "traits": { + "smithy.api#documentation": "

The reason for the UpdateSubscriptionRequest action.

", + "smithy.api#required": {} + } + }, + "subscribedPrincipals": { + "target": "com.amazonaws.datazone#SubscribedPrincipals", + "traits": { + "smithy.api#documentation": "

The subscribed principals of the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "subscribedListings": { + "target": "com.amazonaws.datazone#SubscribedListings", + "traits": { + "smithy.api#documentation": "

The subscribed listings of the subscription request.

", + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "reviewerId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone user who reviews the subscription request.

" + } + }, + "decisionComment": { + "target": "com.amazonaws.datazone#DecisionComment", + "traits": { + "smithy.api#documentation": "

The decision comment of the UpdateSubscriptionRequest action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionTarget": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateSubscriptionTargetInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateSubscriptionTargetOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified subscription target in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/subscription-targets/{identifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionTargetInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription target is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment in which a subscription target is to be\n updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

Identifier of the subscription target that is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateSubscriptionTarget\n action.

" + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorized principals to be updated as part of the\n UpdateSubscriptionTarget action.

" + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The applicable asset types to be updated as part of the\n UpdateSubscriptionTarget action.

" + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration to be updated as part of the UpdateSubscriptionTarget\n action.

" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role to be updated as part of the\n UpdateSubscriptionTarget action.

" + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider to be updated as part of the UpdateSubscriptionTarget\n action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateSubscriptionTargetOutput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.datazone#SubscriptionTargetId", + "traits": { + "smithy.api#documentation": "

Identifier of the subscription target that is to be updated.

", + "smithy.api#required": {} + } + }, + "authorizedPrincipals": { + "target": "com.amazonaws.datazone#AuthorizedPrincipalIdentifiers", + "traits": { + "smithy.api#documentation": "

The authorized principals to be updated as part of the\n UpdateSubscriptionTarget action.

", + "smithy.api#required": {} + } + }, + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a subscription target is to be\n updated.

", + "smithy.api#required": {} + } + }, + "projectId": { + "target": "com.amazonaws.datazone#ProjectId", + "traits": { + "smithy.api#documentation": "

The identifier of the project in which a subscription target is to be updated.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The identifier of the environment in which a subscription target is to be\n updated.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.datazone#SubscriptionTargetName", + "traits": { + "smithy.api#documentation": "

The name to be updated as part of the UpdateSubscriptionTarget\n action.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The type to be updated as part of the UpdateSubscriptionTarget\n action.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.datazone#CreatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who created the subscription target.

", + "smithy.api#required": {} + } + }, + "updatedBy": { + "target": "com.amazonaws.datazone#UpdatedBy", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone user who updated the subscription target.

" + } + }, + "createdAt": { + "target": "com.amazonaws.datazone#CreatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when a subscription target was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.datazone#UpdatedAt", + "traits": { + "smithy.api#documentation": "

The timestamp of when the subscription target was updated.

" + } + }, + "manageAccessRole": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The manage access role to be updated as part of the\n UpdateSubscriptionTarget action.

", + "smithy.api#required": {} + } + }, + "applicableAssetTypes": { + "target": "com.amazonaws.datazone#ApplicableAssetTypes", + "traits": { + "smithy.api#documentation": "

The applicable asset types to be updated as part of the\n UpdateSubscriptionTarget action.

", + "smithy.api#required": {} + } + }, + "subscriptionTargetConfig": { + "target": "com.amazonaws.datazone#SubscriptionTargetForms", + "traits": { + "smithy.api#documentation": "

The configuration to be updated as part of the UpdateSubscriptionTarget\n action.

", + "smithy.api#required": {} + } + }, + "provider": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The provider to be updated as part of the UpdateSubscriptionTarget\n action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdateUserProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateUserProfileInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateUserProfileOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified user profile in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/user-profiles/{userIdentifier}" + }, + "smithy.api#tags": [ + "Administration" + ] + } + }, + "com.amazonaws.datazone#UpdateUserProfileInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userIdentifier": { + "target": "com.amazonaws.datazone#UserIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the user whose user profile is to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile that are to be updated.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the user profile that are to be updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateUserProfileOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone domain in which a user profile is updated.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#UserProfileId", + "traits": { + "smithy.api#documentation": "

The identifier of the user profile.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the user profile.

" + } + }, + "details": { + "target": "com.amazonaws.datazone#UserProfileDetails" + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#UpdatedAt": { + "type": "timestamp" + }, + "com.amazonaws.datazone#UpdatedBy": { + "type": "string" + }, + "com.amazonaws.datazone#UserAssignment": { + "type": "enum", + "members": { + "AUTOMATIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTOMATIC" + } + }, + "MANUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MANUAL" + } + } + } + }, + "com.amazonaws.datazone#UserDesignation": { + "type": "enum", + "members": { + "PROJECT_OWNER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT_OWNER" + } + }, + "PROJECT_CONTRIBUTOR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROJECT_CONTRIBUTOR" + } + } + } + }, + "com.amazonaws.datazone#UserDetails": { + "type": "structure", + "members": { + "userId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon DataZone user.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The user details of a project member.

" + } + }, + "com.amazonaws.datazone#UserIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "(^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$|^[a-zA-Z_0-9+=,.@-]+$|^arn:aws:iam::\\d{12}:.+$)" + } + }, + "com.amazonaws.datazone#UserProfileDetails": { + "type": "union", + "members": { + "iam": { + "target": "com.amazonaws.datazone#IamUserProfileDetails", + "traits": { + "smithy.api#documentation": "

The IAM details included in the user profile details.

" + } + }, + "sso": { + "target": "com.amazonaws.datazone#SsoUserProfileDetails", + "traits": { + "smithy.api#documentation": "

The single sign-on details included in the user profile details.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the user profile in Amazon DataZone.

" + } + }, + "com.amazonaws.datazone#UserProfileId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + } + }, + "com.amazonaws.datazone#UserProfileName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[a-zA-Z_0-9+=,.@-]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#UserProfileStatus": { + "type": "enum", + "members": { + "ASSIGNED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSIGNED" + } + }, + "NOT_ASSIGNED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_ASSIGNED" + } + }, + "ACTIVATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVATED" + } + }, + "DEACTIVATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEACTIVATED" + } + } + } + }, + "com.amazonaws.datazone#UserProfileSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#UserProfileSummary" + } + }, + "com.amazonaws.datazone#UserProfileSummary": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain of the user profile.

" + } + }, + "id": { + "target": "com.amazonaws.datazone#UserProfileId", + "traits": { + "smithy.api#documentation": "

The ID of the user profile.

" + } + }, + "type": { + "target": "com.amazonaws.datazone#UserProfileType", + "traits": { + "smithy.api#documentation": "

The type of the user profile.

" + } + }, + "status": { + "target": "com.amazonaws.datazone#UserProfileStatus", + "traits": { + "smithy.api#documentation": "

The status of the user profile.

" + } + }, + "details": { + "target": "com.amazonaws.datazone#UserProfileDetails", + "traits": { + "smithy.api#documentation": "

The details of the user profile.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the user profile.

" + } + }, + "com.amazonaws.datazone#UserProfileType": { + "type": "enum", + "members": { + "IAM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IAM" + } + }, + "SSO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SSO" + } + } + } + }, + "com.amazonaws.datazone#UserSearchText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.datazone#UserSearchType": { + "type": "enum", + "members": { + "SSO_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SSO_USER" + } + }, + "DATAZONE_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATAZONE_USER" + } + }, + "DATAZONE_SSO_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATAZONE_SSO_USER" + } + }, + "DATAZONE_IAM_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATAZONE_IAM_USER" + } + } + } + }, + "com.amazonaws.datazone#UserType": { + "type": "enum", + "members": { + "IAM_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IAM_USER" + } + }, + "IAM_ROLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IAM_ROLE" + } + }, + "SSO_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SSO_USER" + } + } + } + }, + "com.amazonaws.datazone#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.datazone#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The input fails to satisfy the constraints specified by the Amazon Web Services service.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} \ No newline at end of file diff --git a/models/ec2.json b/models/ec2.json index 8d9b31253f..13e295e3d9 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -1575,7 +1575,7 @@ "NetworkBorderGroup": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services\n advertises IP addresses. Use this parameter to limit the IP address to this location. IP\n addresses cannot move between network border groups.

\n

Use DescribeAvailabilityZones to view the network border groups.

\n

You cannot use a network border group with EC2 Classic. If you attempt this operation on EC2 Classic, \n you receive an InvalidParameterCombination error.

" + "smithy.api#documentation": "

A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services\n advertises IP addresses. Use this parameter to limit the IP address to this location. IP\n addresses cannot move between network border groups.

\n

Use DescribeAvailabilityZones to view the network border groups.

" } }, "CustomerOwnedIpv4Pool": { @@ -3254,6 +3254,9 @@ { "target": "com.amazonaws.ec2#DisableFastSnapshotRestores" }, + { + "target": "com.amazonaws.ec2#DisableImage" + }, { "target": "com.amazonaws.ec2#DisableImageBlockPublicAccess" }, @@ -3335,6 +3338,9 @@ { "target": "com.amazonaws.ec2#EnableFastSnapshotRestores" }, + { + "target": "com.amazonaws.ec2#EnableImage" + }, { "target": "com.amazonaws.ec2#EnableImageBlockPublicAccess" }, @@ -6502,7 +6508,7 @@ "target": "com.amazonaws.ec2#AssociateNatGatewayAddressResult" }, "traits": { - "smithy.api#documentation": "

Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, \n see Work with NAT gateways in the Amazon VPC User Guide.

\n

By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide.

" + "smithy.api#documentation": "

Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, \n see Work with NAT gateways in the Amazon VPC User Guide.

\n

By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide.

\n \n

When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. \n

\n
" } }, "com.amazonaws.ec2#AssociateNatGatewayAddressRequest": { @@ -9594,7 +9600,7 @@ "traits": { "aws.protocols#ec2QueryName": "ExportTaskId", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the export task. This is the ID returned by CreateInstanceExportTask.

", + "smithy.api#documentation": "

The ID of the export task. This is the ID returned by the\n CreateInstanceExportTask and ExportImage operations.

", "smithy.api#required": {}, "smithy.api#xmlName": "exportTaskId" } @@ -10731,6 +10737,12 @@ "traits": { "smithy.api#enumValue": "RHEL with HA and SQL Server Enterprise" } + }, + "UBUNTU_PRO_LINUX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Ubuntu Pro" + } } } }, @@ -14698,7 +14710,7 @@ "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "smithy.api#documentation": "

The key-value pair for tagging the EC2 Fleet request on creation. For more information, see \n Tagging your resources.

\n

If the fleet type is instant, specify a resource type of fleet \n to tag the fleet or instance to tag the instances at launch.

\n

If the fleet type is maintain or request, specify a resource\n type of fleet to tag the fleet. You cannot specify a resource type of\n instance. To tag instances at launch, specify the tags in a launch template.

", + "smithy.api#documentation": "

The key-value pair for tagging the EC2 Fleet request on creation. For more information, see \n Tag your resources.

\n

If the fleet type is instant, specify a resource type of fleet \n to tag the fleet or instance to tag the instances at launch.

\n

If the fleet type is maintain or request, specify a resource\n type of fleet to tag the fleet. You cannot specify a resource type of\n instance. To tag instances at launch, specify the tags in a launch template.

", "smithy.api#xmlName": "TagSpecification" } }, @@ -16461,7 +16473,7 @@ "target": "com.amazonaws.ec2#CreateNatGatewayResult" }, "traits": { - "smithy.api#documentation": "

Creates a NAT gateway in the specified subnet. This action creates a network interface\n in the specified subnet with a private IP address from the IP address range of the\n subnet. You can create either a public NAT gateway or a private NAT gateway.

\n

With a public NAT gateway, internet-bound traffic from a private subnet can be routed\n to the NAT gateway, so that instances in a private subnet can connect to the internet.

\n

With a private NAT gateway, private communication is routed across VPCs and on-premises\n networks through a transit gateway or virtual private gateway. Common use cases include\n running large workloads behind a small pool of allowlisted IPv4 addresses, preserving\n private IPv4 addresses, and communicating between overlapping networks.

\n

For more information, see NAT gateways in the Amazon VPC User Guide.

", + "smithy.api#documentation": "

Creates a NAT gateway in the specified subnet. This action creates a network interface\n in the specified subnet with a private IP address from the IP address range of the\n subnet. You can create either a public NAT gateway or a private NAT gateway.

\n

With a public NAT gateway, internet-bound traffic from a private subnet can be routed\n to the NAT gateway, so that instances in a private subnet can connect to the internet.

\n

With a private NAT gateway, private communication is routed across VPCs and on-premises\n networks through a transit gateway or virtual private gateway. Common use cases include\n running large workloads behind a small pool of allowlisted IPv4 addresses, preserving\n private IPv4 addresses, and communicating between overlapping networks.

\n

For more information, see NAT gateways in the Amazon VPC User Guide.

\n \n

When you create a public NAT gateway and assign it an EIP or secondary EIPs, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the NAT gateway will fail to launch. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. \n

\n
", "smithy.api#examples": [ { "title": "To create a NAT gateway", @@ -25695,7 +25707,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.

", + "smithy.api#documentation": "

Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on. When you delete the VPC, it deletes the VPC's default security group, network ACL, and route table.

", "smithy.api#examples": [ { "title": "To delete a VPC", @@ -30634,7 +30646,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n architecture - The image architecture (i386 | x86_64 | \n arm64 | x86_64_mac | arm64_mac).

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean value that indicates\n \twhether the Amazon EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in the block device mapping (for\n example, /dev/sdh or xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS\n volume.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in GiB.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-type - The volume type of the Amazon EBS volume\n (io1 | io2 | gp2 | gp3 | sc1\n | st1 | standard).

    \n
  • \n
  • \n

    \n block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS volume is encrypted.

    \n
  • \n
  • \n

    \n creation-date - The time when the image was created, in the ISO 8601\n format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for\n example, 2021-09-29T*, which matches an entire day.

    \n
  • \n
  • \n

    \n description - The description of the image (provided during image\n creation).

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether enhanced networking\n with ENA is enabled.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type (ovm |\n xen).

    \n
  • \n
  • \n

    \n image-id - The ID of the image.

    \n
  • \n
  • \n

    \n image-type - The image type (machine | kernel |\n ramdisk).

    \n
  • \n
  • \n

    \n is-public - A Boolean that indicates whether the image is public.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n manifest-location - The location of the image manifest.

    \n
  • \n
  • \n

    \n name - The name of the AMI (provided during image creation).

    \n
  • \n
  • \n

    \n owner-alias - The owner alias (amazon | aws-marketplace). \n The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be \n \tset using the IAM console. We recommend that you use the Owner \n \trequest parameter instead of this filter.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the \n \t\tOwner request parameter instead of this filter.

    \n
  • \n
  • \n

    \n platform - The platform. The only supported value is windows.

    \n
  • \n
  • \n

    \n product-code - The product code.

    \n
  • \n
  • \n

    \n product-code.type - The type of the product code (marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n state - The state of the image (available | pending\n | failed).

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - The message for the state change.

    \n
  • \n
  • \n

    \n sriov-net-support - A value of simple indicates\n that enhanced networking with the Intel 82599 VF interface is enabled.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type (paravirtual |\n hvm).

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n architecture - The image architecture (i386 | x86_64 | \n arm64 | x86_64_mac | arm64_mac).

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean value that indicates\n \twhether the Amazon EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in the block device mapping (for\n example, /dev/sdh or xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS\n volume.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in GiB.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-type - The volume type of the Amazon EBS volume\n (io1 | io2 | gp2 | gp3 | sc1\n | st1 | standard).

    \n
  • \n
  • \n

    \n block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS volume is encrypted.

    \n
  • \n
  • \n

    \n creation-date - The time when the image was created, in the ISO 8601\n format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for\n example, 2021-09-29T*, which matches an entire day.

    \n
  • \n
  • \n

    \n description - The description of the image (provided during image\n creation).

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether enhanced networking\n with ENA is enabled.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type (ovm |\n xen).

    \n
  • \n
  • \n

    \n image-id - The ID of the image.

    \n
  • \n
  • \n

    \n image-type - The image type (machine | kernel |\n ramdisk).

    \n
  • \n
  • \n

    \n is-public - A Boolean that indicates whether the image is public.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n manifest-location - The location of the image manifest.

    \n
  • \n
  • \n

    \n name - The name of the AMI (provided during image creation).

    \n
  • \n
  • \n

    \n owner-alias - The owner alias (amazon | aws-marketplace). \n The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be \n \tset using the IAM console. We recommend that you use the Owner \n \trequest parameter instead of this filter.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the \n \t\tOwner request parameter instead of this filter.

    \n
  • \n
  • \n

    \n platform - The platform. The only supported value is windows.

    \n
  • \n
  • \n

    \n product-code - The product code.

    \n
  • \n
  • \n

    \n product-code.type - The type of the product code (marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n source-instance-id - The ID of the instance that the AMI was created from\n if the AMI was created using CreateImage. This filter is applicable only if the AMI was\n created using CreateImage.

    \n
  • \n
  • \n

    \n state - The state of the image (available | pending\n | failed).

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - The message for the state change.

    \n
  • \n
  • \n

    \n sriov-net-support - A value of simple indicates\n that enhanced networking with the Intel 82599 VF interface is enabled.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type (paravirtual |\n hvm).

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -30660,6 +30672,14 @@ "smithy.api#documentation": "

Specifies whether to include deprecated AMIs.

\n

Default: No deprecated AMIs are included in the response.

\n \n

If you are the AMI owner, all deprecated AMIs appear in the response regardless of what\n you specify for this parameter.

\n
" } }, + "IncludeDisabled": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "

Specifies whether to include disabled AMIs.

\n

Default: No disabled AMIs are included in the response.

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -31821,7 +31841,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n iam-instance-profile.name - The instance profile associated with\n the instance. Specified as an name.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance or\n a Scheduled Instance (spot | scheduled).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n license-pool -

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the\n instance.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n iam-instance-profile.name - The instance profile associated with\n the instance. Specified as an name.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance or\n a Scheduled Instance (spot | scheduled).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the\n instance.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -34652,7 +34672,7 @@ "target": "com.amazonaws.ec2#DescribeNetworkInterfacesResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your network interfaces.

", + "smithy.api#documentation": "

Describes one or more of your network interfaces.

\n

If you have a large number of network interfaces, the operation fails unless \n you use pagination or one of the following filters: group-id, \n mac-address, private-dns-name, private-ip-address, \n private-dns-name, subnet-id, or vpc-id.

", "smithy.api#examples": [ { "title": "To describe a network interface", @@ -34768,7 +34788,7 @@ "target": "com.amazonaws.ec2#FilterList", "traits": { "aws.protocols#ec2QueryName": "Filter", - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n the network interface was associated with the Elastic IP address\n (IPv4).

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n group-name - The name of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | efa | \n\t\t gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | \n\t\t interface | iot_rules_managed | lambda | load_balancer | \n\t\t nat_gateway | network_load_balancer | quicksight | \n\t\t transit_gateway | trunk | vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service \n\t\t (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | \n\t\t ec2_instance_connect_endpoint | efa | efs | \n\t\t gateway_load_balancer | gateway_load_balancer_endpoint | \n\t\t global_accelerator_managed | \n\t\t interface | iot_rules_managed | \n\t\t lambda | load_balancer | \n\t\t nat_gateway | network_load_balancer | \n\t\t quicksight | \n\t\t transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Service \n\t\t (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", "smithy.api#xmlName": "filter" } }, @@ -37797,7 +37817,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n task-state - Returns tasks in a certain state (InProgress |\n Completed | Failed)

    \n
  • \n
  • \n

    \n bucket - Returns task information for tasks that targeted a specific\n bucket. For the filter value, specify the bucket name.

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n task-state - Returns tasks in a certain state (InProgress |\n Completed | Failed)

    \n
  • \n
  • \n

    \n bucket - Returns task information for tasks that targeted a specific\n bucket. For the filter value, specify the bucket name.

    \n
  • \n
\n \n

When you specify the ImageIds parameter, any filters that you specify are\n ignored. To use the filters, you must remove the ImageIds parameter.

\n
", "smithy.api#xmlName": "Filter" } }, @@ -37812,7 +37832,7 @@ "traits": { "smithy.api#clientOptional": {}, "smithy.api#default": 0, - "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

\n

You cannot specify this parameter and the ImageIDs parameter\n in the same call.

" + "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

\n

You cannot specify this parameter and the ImageIds parameter in the same\n call.

" } } }, @@ -42785,6 +42805,18 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DisableImage": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DisableImageRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DisableImageResult" + }, + "traits": { + "smithy.api#documentation": "

Sets the AMI state to disabled and removes all launch permissions from the\n AMI. A disabled AMI can't be used for instance launches.

\n

A disabled AMI can't be shared. If a public or shared AMI was previously shared, it is\n made private. If an AMI was shared with an Amazon Web Services account, organization, or Organizational\n Unit, they lose access to the disabled AMI.

\n

A disabled AMI does not appear in DescribeImages API calls by\n default.

\n

Only the AMI owner can disable an AMI.

\n

You can re-enable a disabled AMI using EnableImage.

\n

For more information, see Disable an AMI in the\n Amazon EC2 User Guide.

" + } + }, "com.amazonaws.ec2#DisableImageBlockPublicAccess": { "type": "operation", "input": { @@ -42883,6 +42915,48 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DisableImageRequest": { + "type": "structure", + "members": { + "ImageId": { + "target": "com.amazonaws.ec2#ImageId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the AMI.

", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DisableImageResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, it returns an error.

", + "smithy.api#xmlName": "return" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DisableIpamOrganizationAdminAccount": { "type": "operation", "input": { @@ -46070,6 +46144,18 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#EnableImage": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#EnableImageRequest" + }, + "output": { + "target": "com.amazonaws.ec2#EnableImageResult" + }, + "traits": { + "smithy.api#documentation": "

Re-enables a disabled AMI. The re-enabled AMI is marked as available and can\n be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services\n accounts, organizations, and Organizational Units that lost access to the AMI when it was\n disabled do not regain access automatically. Once the AMI is available, it can be shared with\n them again.

\n

Only the AMI owner can re-enable a disabled AMI.

\n

For more information, see Disable an AMI in the\n Amazon EC2 User Guide.

" + } + }, "com.amazonaws.ec2#EnableImageBlockPublicAccess": { "type": "operation", "input": { @@ -46184,6 +46270,48 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#EnableImageRequest": { + "type": "structure", + "members": { + "ImageId": { + "target": "com.amazonaws.ec2#ImageId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the AMI.

", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#EnableImageResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, it returns an error.

", + "smithy.api#xmlName": "return" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#EnableIpamOrganizationAdminAccount": { "type": "operation", "input": { @@ -48842,7 +48970,7 @@ "target": "com.amazonaws.ec2#DescribeFleetsErrorSet", "traits": { "aws.protocols#ec2QueryName": "ErrorSet", - "smithy.api#documentation": "

Information about the instances that could not be launched by the fleet. Valid only when\n Type is set to instant.

", + "smithy.api#documentation": "

Information about the instances that could not be launched by the fleet. Valid only when\n Type is set to instant.

", "smithy.api#xmlName": "errorSet" } }, @@ -48850,7 +48978,7 @@ "target": "com.amazonaws.ec2#DescribeFleetsInstancesSet", "traits": { "aws.protocols#ec2QueryName": "FleetInstanceSet", - "smithy.api#documentation": "

Information about the instances that were launched by the fleet. Valid only when\n Type is set to instant.

", + "smithy.api#documentation": "

Information about the instances that were launched by the fleet. Valid only when\n Type is set to instant.

", "smithy.api#xmlName": "fleetInstanceSet" } }, @@ -55038,7 +55166,7 @@ "target": "com.amazonaws.ec2#HypervisorType", "traits": { "aws.protocols#ec2QueryName": "Hypervisor", - "smithy.api#documentation": "

The hypervisor type of the image.

", + "smithy.api#documentation": "

The hypervisor type of the image. Only xen is supported. ovm is\n not supported.

", "smithy.api#xmlName": "hypervisor" } }, @@ -55137,6 +55265,14 @@ "smithy.api#documentation": "

If v2.0, it indicates that IMDSv2 is specified in the AMI. Instances launched\n from this AMI will have HttpTokens automatically set to required so\n that, by default, the instance requires that IMDSv2 is used when requesting instance metadata.\n In addition, HttpPutResponseHopLimit is set to 2. For more\n information, see Configure\n the AMI in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "imdsSupport" } + }, + "SourceInstanceId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SourceInstanceId", + "smithy.api#documentation": "

The ID of the instance that the AMI was created from if the AMI was created using CreateImage. This field only appears if the AMI was created using\n CreateImage.

", + "smithy.api#xmlName": "sourceInstanceId" + } } }, "traits": { @@ -55538,6 +55674,12 @@ "traits": { "smithy.api#enumValue": "error" } + }, + "disabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "disabled" + } } } }, @@ -55799,7 +55941,7 @@ "BootMode": { "target": "com.amazonaws.ec2#BootModeValues", "traits": { - "smithy.api#documentation": "

The boot mode of the virtual machine.

" + "smithy.api#documentation": "

The boot mode of the virtual machine.

\n \n

The uefi-preferred boot mode isn't supported for importing images. For more\n information, see Boot modes in\n the VM Import/Export User Guide.

\n
" } } }, @@ -70065,7 +70207,7 @@ "ResourceType": { "target": "com.amazonaws.ec2#ResourceType", "traits": { - "smithy.api#documentation": "

The type of resource to tag.

\n

The Valid Values are all the resource types that can be tagged. However,\n when creating a launch template, you can specify tags for the following resource types\n only: instance | volume | elastic-gpu |\n network-interface | spot-instances-request\n

\n

To tag a resource after it has been created, see CreateTags.

" + "smithy.api#documentation": "

The type of resource to tag.

\n

Valid Values lists all resource types for Amazon EC2 that can be tagged. When\n you create a launch template, you can specify tags for the following resource types\n only: instance | volume | elastic-gpu |\n network-interface | spot-instances-request.\n If the instance does include the resource type that you specify, the instance \n launch fails. For example, not all instance types include an Elastic GPU.

\n

To tag a resource after it has been created, see CreateTags.

" } }, "Tags": { @@ -80109,7 +80251,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "MaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

", + "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The maxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", "smithy.api#xmlName": "maxTotalPrice" } } @@ -80160,7 +80302,7 @@ "MaxTotalPrice": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

" + "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The MaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
" } } }, @@ -84303,7 +84445,7 @@ "NetworkBorderGroup": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises\n IP addresses.

\n

If you provide an incorrect network border group, you receive an InvalidAddress.NotFound error.

\n

You cannot use a network border group with EC2 Classic. If you attempt this operation on EC2 classic, you \n receive an InvalidParameterCombination error.

" + "smithy.api#documentation": "

The set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises\n IP addresses.

\n

If you provide an incorrect network border group, you receive an InvalidAddress.NotFound error.

" } }, "DryRun": { @@ -85665,7 +85807,7 @@ "ElasticInferenceAccelerators": { "target": "com.amazonaws.ec2#LaunchTemplateElasticInferenceAcceleratorList", "traits": { - "smithy.api#documentation": "

The elastic inference accelerator for the instance.

", + "smithy.api#documentation": "

An elastic inference accelerator to associate with the instance. Elastic inference\n accelerators are a resource you can attach to your Amazon EC2 instances to accelerate\n your Deep Learning (DL) inference workloads.

\n

You cannot specify accelerators from different generations in the same request.

\n \n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon\n Elastic Inference (EI), and will help current customers migrate their workloads to\n options that offer better price and performance. After April 15, 2023, new customers\n will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker,\n Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during\n the past 30-day period are considered current customers and will be able to continue\n using the service.

\n
", "smithy.api#xmlName": "ElasticInferenceAccelerator" } }, @@ -88295,7 +88437,7 @@ "target": "com.amazonaws.ec2#LaunchTemplateElasticInferenceAcceleratorResponseList", "traits": { "aws.protocols#ec2QueryName": "ElasticInferenceAcceleratorSet", - "smithy.api#documentation": "

The elastic inference accelerator for the instance.

", + "smithy.api#documentation": "

An elastic inference accelerator to associate with the instance. Elastic inference\n accelerators are a resource you can attach to your Amazon EC2 instances to accelerate\n your Deep Learning (DL) inference workloads.

\n

You cannot specify accelerators from different generations in the same request.

\n \n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon\n Elastic Inference (EI), and will help current customers migrate their workloads to\n options that offer better price and performance. After April 15, 2023, new customers\n will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker,\n Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during\n the past 30-day period are considered current customers and will be able to continue\n using the service.

\n
", "smithy.api#xmlName": "elasticInferenceAcceleratorSet" } }, @@ -93501,7 +93643,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "OnDemandMaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay. You\n can use the onDemandMaxTotalPrice parameter, the\n spotMaxTotalPrice parameter, or both parameters to ensure that your\n fleet cost does not exceed your budget. If you set a maximum price per hour for the\n On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the\n maximum amount you're willing to pay. When the maximum amount you're willing to pay is\n reached, the fleet stops launching instances even if it hasn’t met the target\n capacity.

", + "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay. You\n can use the onDemandMaxTotalPrice parameter, the\n spotMaxTotalPrice parameter, or both parameters to ensure that your\n fleet cost does not exceed your budget. If you set a maximum price per hour for the\n On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the\n maximum amount you're willing to pay. When the maximum amount you're willing to pay is\n reached, the fleet stops launching instances even if it hasn’t met the target\n capacity.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The onDemandMaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", "smithy.api#xmlName": "onDemandMaxTotalPrice" } }, @@ -93509,7 +93651,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "SpotMaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. You can use\n the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice\n parameter, or both parameters to ensure that your fleet cost does not exceed your\n budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request,\n Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay.\n When the maximum amount you're willing to pay is reached, the fleet stops launching\n instances even if it hasn’t met the target capacity.

", + "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. You can use\n the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice\n parameter, or both parameters to ensure that your fleet cost does not exceed your budget.\n If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will\n launch instances until it reaches the maximum amount you're willing to pay. When the\n maximum amount you're willing to pay is reached, the fleet stops launching instances even\n if it hasn’t met the target capacity.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The spotMaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", "smithy.api#xmlName": "spotMaxTotalPrice" } }, @@ -93602,7 +93744,7 @@ "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "smithy.api#documentation": "

The key-value pair for tagging the Spot Fleet request on creation. The value for\n ResourceType must be spot-fleet-request, otherwise the\n Spot Fleet request fails. To tag instances at launch, specify the tags in the launch\n template (valid only if you use LaunchTemplateConfigs) or in\n the \n SpotFleetTagSpecification\n (valid only if you use\n LaunchSpecifications). For information about tagging after launch, see\n Tagging Your Resources.

", + "smithy.api#documentation": "

The key-value pair for tagging the Spot Fleet request on creation. The value for\n ResourceType must be spot-fleet-request, otherwise the\n Spot Fleet request fails. To tag instances at launch, specify the tags in the launch\n template (valid only if you use LaunchTemplateConfigs) or in\n the \n SpotFleetTagSpecification\n (valid only if you use\n LaunchSpecifications). For information about tagging after launch, see\n Tag your resources.

", "smithy.api#xmlName": "TagSpecification" } } @@ -94112,7 +94254,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "MaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
", + "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The maxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", "smithy.api#xmlName": "maxTotalPrice" } } @@ -94177,7 +94319,7 @@ "MaxTotalPrice": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
" + "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The MaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
" } } }, diff --git a/models/elastic-load-balancing-v2.json b/models/elastic-load-balancing-v2.json index a8bf82bb0b..1ac55643aa 100644 --- a/models/elastic-load-balancing-v2.json +++ b/models/elastic-load-balancing-v2.json @@ -4732,7 +4732,7 @@ "Key": { "target": "com.amazonaws.elasticloadbalancingv2#LoadBalancerAttributeKey", "traits": { - "smithy.api#documentation": "

The name of the attribute.

\n

The following attributes are supported by all load balancers:

\n
    \n
  • \n

    \n deletion_protection.enabled - Indicates whether deletion protection is\n enabled. The value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n load_balancing.cross_zone.enabled - Indicates whether cross-zone load\n balancing is enabled. The possible values are true and false.\n The default for Network Load Balancers and Gateway Load Balancers is false. \n The default for Application Load Balancers is true, and cannot be changed.

    \n
  • \n
\n

The following attributes are supported by both Application Load Balancers and Network Load\n Balancers:

\n
    \n
  • \n

    \n access_logs.s3.enabled - Indicates whether access logs are enabled. The\n value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n access_logs.s3.bucket - The name of the S3 bucket for the access logs.\n This attribute is required if access logs are enabled. The bucket must exist in the same\n region as the load balancer and have a bucket policy that grants Elastic Load Balancing\n permissions to write to the bucket.

    \n
  • \n
  • \n

    \n access_logs.s3.prefix - The prefix for the location in the S3 bucket for the\n access logs.

    \n
  • \n
  • \n

    \n ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the\n load balancer. It is set to false for internet-facing load balancers and\n true for internal load balancers, preventing unintended access to your\n internal load balancer through an internet gateway.

    \n
  • \n
\n

The following attributes are supported by only Application Load Balancers:

\n
    \n
  • \n

    \n idle_timeout.timeout_seconds - The idle timeout value, in seconds. The\n valid range is 1-4000 seconds. The default is 60 seconds.

    \n
  • \n
  • \n

    \n routing.http.desync_mitigation_mode - Determines how the load balancer\n handles requests that might pose a security risk to your application. The possible values\n are monitor, defensive, and strictest. The default\n is defensive.

    \n
  • \n
  • \n

    \n routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP\n headers with invalid header fields are removed by the load balancer (true) or\n routed to targets (false). The default is false.

    \n
  • \n
  • \n

    \n routing.http.preserve_host_header.enabled - Indicates whether the\n Application Load Balancer should preserve the Host header in the HTTP request\n and send it to the target without any change. The possible values are true\n and false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates\n whether the two headers (x-amzn-tls-version and\n x-amzn-tls-cipher-suite), which contain information about the negotiated\n TLS version and cipher suite, are added to the client request before sending it to the\n target. The x-amzn-tls-version header has information about the TLS protocol\n version negotiated with the client, and the x-amzn-tls-cipher-suite header\n has information about the cipher suite negotiated with the client. Both headers are in\n OpenSSL format. The possible values for the attribute are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_client_port.enabled - Indicates whether the\n X-Forwarded-For header should preserve the source port that the client used\n to connect to the load balancer. The possible values are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_header_processing.mode - Enables you to modify,\n preserve, or remove the X-Forwarded-For header in the HTTP request before the\n Application Load Balancer sends the request to the target. The possible values are\n append, preserve, and remove. The default is\n append.

    \n
      \n
    • \n

      If the value is append, the Application Load Balancer adds the client\n IP address (of the last hop) to the X-Forwarded-For header in the HTTP\n request before it sends it to targets.

      \n
    • \n
    • \n

      If the value is preserve the Application Load Balancer preserves the\n X-Forwarded-For header in the HTTP request, and sends it to targets\n without any change.

      \n
    • \n
    • \n

      If the value is remove, the Application Load Balancer removes the\n X-Forwarded-For header in the HTTP request before it sends it to\n targets.

      \n
    • \n
    \n
  • \n
  • \n

    \n routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible\n values are true and false. The default is true.\n Elastic Load Balancing requires that message header names contain only alphanumeric\n characters and hyphens.

    \n
  • \n
  • \n

    \n waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load\n balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The\n default is false.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the attribute.

\n

The following attributes are supported by all load balancers:

\n
    \n
  • \n

    \n deletion_protection.enabled - Indicates whether deletion protection is\n enabled. The value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n load_balancing.cross_zone.enabled - Indicates whether cross-zone load\n balancing is enabled. The possible values are true and false.\n The default for Network Load Balancers and Gateway Load Balancers is false. \n The default for Application Load Balancers is true, and cannot be changed.

    \n
  • \n
\n

The following attributes are supported by both Application Load Balancers and Network Load\n Balancers:

\n
    \n
  • \n

    \n access_logs.s3.enabled - Indicates whether access logs are enabled. The\n value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n access_logs.s3.bucket - The name of the S3 bucket for the access logs.\n This attribute is required if access logs are enabled. The bucket must exist in the same\n region as the load balancer and have a bucket policy that grants Elastic Load Balancing\n permissions to write to the bucket.

    \n
  • \n
  • \n

    \n access_logs.s3.prefix - The prefix for the location in the S3 bucket for the\n access logs.

    \n
  • \n
  • \n

    \n ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the\n load balancer. It is set to false for internet-facing load balancers and\n true for internal load balancers, preventing unintended access to your\n internal load balancer through an internet gateway.

    \n
  • \n
\n

The following attributes are supported by only Application Load Balancers:

\n
    \n
  • \n

    \n idle_timeout.timeout_seconds - The idle timeout value, in seconds. The\n valid range is 1-4000 seconds. The default is 60 seconds.

    \n
  • \n
  • \n

    \n routing.http.desync_mitigation_mode - Determines how the load balancer\n handles requests that might pose a security risk to your application. The possible values\n are monitor, defensive, and strictest. The default\n is defensive.

    \n
  • \n
  • \n

    \n routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP\n headers with invalid header fields are removed by the load balancer (true) or\n routed to targets (false). The default is false.

    \n
  • \n
  • \n

    \n routing.http.preserve_host_header.enabled - Indicates whether the\n Application Load Balancer should preserve the Host header in the HTTP request\n and send it to the target without any change. The possible values are true\n and false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates\n whether the two headers (x-amzn-tls-version and\n x-amzn-tls-cipher-suite), which contain information about the negotiated\n TLS version and cipher suite, are added to the client request before sending it to the\n target. The x-amzn-tls-version header has information about the TLS protocol\n version negotiated with the client, and the x-amzn-tls-cipher-suite header\n has information about the cipher suite negotiated with the client. Both headers are in\n OpenSSL format. The possible values for the attribute are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_client_port.enabled - Indicates whether the\n X-Forwarded-For header should preserve the source port that the client used\n to connect to the load balancer. The possible values are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_header_processing.mode - Enables you to modify,\n preserve, or remove the X-Forwarded-For header in the HTTP request before the\n Application Load Balancer sends the request to the target. The possible values are\n append, preserve, and remove. The default is\n append.

    \n
      \n
    • \n

      If the value is append, the Application Load Balancer adds the client\n IP address (of the last hop) to the X-Forwarded-For header in the HTTP\n request before it sends it to targets.

      \n
    • \n
    • \n

      If the value is preserve the Application Load Balancer preserves the\n X-Forwarded-For header in the HTTP request, and sends it to targets\n without any change.

      \n
    • \n
    • \n

      If the value is remove, the Application Load Balancer removes the\n X-Forwarded-For header in the HTTP request before it sends it to\n targets.

      \n
    • \n
    \n
  • \n
  • \n

    \n routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible\n values are true and false. The default is true.\n Elastic Load Balancing requires that message header names contain only alphanumeric\n characters and hyphens.

    \n
  • \n
  • \n

    \n waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load\n balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The\n default is false.

    \n
  • \n
\n

The following attributes are supported by only Network Load Balancers:

\n
    \n
  • \n

    \n dns_record.client_routing_policy - Indicates how traffic is \n distributed among the load balancer Availability Zones. The possible values are \n availability_zone_affinity with 100 percent zonal affinity, \n partial_availability_zone_affinity with 85 percent zonal affinity, \n and any_availability_zone with 0 percent zonal affinity.

    \n
  • \n
" } }, "Value": { @@ -6487,7 +6487,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables the Availability Zones for the specified public subnets for the specified\n Application Load Balancer or Network Load Balancer. The specified subnets replace the\n previously enabled subnets.

\n

When you specify subnets for a Network Load Balancer, you must include all subnets that\n were enabled previously, with their existing configurations, plus any additional\n subnets.

", + "smithy.api#documentation": "

Enables the Availability Zones for the specified public subnets for the specified\n Application Load Balancer, Network Load Balancer or Gateway Load Balancer. The specified subnets replace the\n previously enabled subnets.

\n

When you specify subnets for a Network Load Balancer, or Gateway Load Balancer you must include all subnets that\n were enabled previously, with their existing configurations, plus any additional\n subnets.

", "smithy.api#examples": [ { "title": "To enable Availability Zones for a load balancer", @@ -6528,19 +6528,19 @@ "Subnets": { "target": "com.amazonaws.elasticloadbalancingv2#Subnets", "traits": { - "smithy.api#documentation": "

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You\n must specify either subnets or subnet mappings.

\n

[Application Load Balancers] You must specify subnets from at least two Availability\n Zones.

\n

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

\n

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local\n Zones.

\n

[Network Load Balancers] You can specify subnets from one or more Availability\n Zones.

" + "smithy.api#documentation": "

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You\n must specify either subnets or subnet mappings.

\n

[Application Load Balancers] You must specify subnets from at least two Availability\n Zones.

\n

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

\n

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local\n Zones.

\n

[Network Load Balancers] You can specify subnets from one or more Availability\n Zones.

\n

[Gateway Load Balancers] You can specify subnets from one or more Availability\n Zones.

" } }, "SubnetMappings": { "target": "com.amazonaws.elasticloadbalancingv2#SubnetMappings", "traits": { - "smithy.api#documentation": "

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You\n must specify either subnets or subnet mappings.

\n

[Application Load Balancers] You must specify subnets from at least two Availability\n Zones. You cannot specify Elastic IP addresses for your subnets.

\n

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

\n

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local\n Zones.

\n

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You\n can specify one Elastic IP address per subnet if you need static IP addresses for your\n internet-facing load balancer. For internal load balancers, you can specify one private IP\n address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you\n can specify one IPv6 address per subnet.

" + "smithy.api#documentation": "

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You\n must specify either subnets or subnet mappings.

\n

[Application Load Balancers] You must specify subnets from at least two Availability\n Zones. You cannot specify Elastic IP addresses for your subnets.

\n

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

\n

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local\n Zones.

\n

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You\n can specify one Elastic IP address per subnet if you need static IP addresses for your\n internet-facing load balancer. For internal load balancers, you can specify one private IP\n address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you\n can specify one IPv6 address per subnet.

\n

[Gateway Load Balancers] You can specify subnets from one or more Availability\n Zones.

" } }, "IpAddressType": { "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { - "smithy.api#documentation": "

[Network Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses). You can’t specify\n dualstack for a load balancer with a UDP or TCP_UDP listener.

" + "smithy.api#documentation": "

[Network Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses). You can’t specify\n dualstack for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses).

" } } }, @@ -6560,7 +6560,7 @@ "IpAddressType": { "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { - "smithy.api#documentation": "

[Network Load Balancers] The IP address type.

" + "smithy.api#documentation": "

[Network Load Balancers] The IP address type.

\n

[Gateway Load Balancers] The IP address type.

" } } }, diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 1dd952e582..94f3cc15ef 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -2422,7 +2422,14 @@ }, "bedrock" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-southeast-1" : { }, + "bedrock-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "bedrock.ap-northeast-1.amazonaws.com" + }, "bedrock-ap-southeast-1" : { "credentialScope" : { "region" : "ap-southeast-1" @@ -2441,6 +2448,12 @@ }, "hostname" : "bedrock-fips.us-west-2.amazonaws.com" }, + "bedrock-runtime-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "bedrock-runtime.ap-northeast-1.amazonaws.com" + }, "bedrock-runtime-ap-southeast-1" : { "credentialScope" : { "region" : "ap-southeast-1" @@ -4573,6 +4586,118 @@ } } }, + "datazone" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { + "hostname" : "datazone.af-south-1.api.aws" + }, + "ap-east-1" : { + "hostname" : "datazone.ap-east-1.api.aws" + }, + "ap-northeast-1" : { + "hostname" : "datazone.ap-northeast-1.api.aws" + }, + "ap-northeast-2" : { + "hostname" : "datazone.ap-northeast-2.api.aws" + }, + "ap-northeast-3" : { + "hostname" : "datazone.ap-northeast-3.api.aws" + }, + "ap-south-1" : { + "hostname" : "datazone.ap-south-1.api.aws" + }, + "ap-south-2" : { + "hostname" : "datazone.ap-south-2.api.aws" + }, + "ap-southeast-1" : { + "hostname" : "datazone.ap-southeast-1.api.aws" + }, + "ap-southeast-2" : { + "hostname" : "datazone.ap-southeast-2.api.aws" + }, + "ap-southeast-3" : { + "hostname" : "datazone.ap-southeast-3.api.aws" + }, + "ap-southeast-4" : { + "hostname" : "datazone.ap-southeast-4.api.aws" + }, + "ca-central-1" : { + "hostname" : "datazone.ca-central-1.api.aws", + "variants" : [ { + "hostname" : "datazone-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "hostname" : "datazone.eu-central-1.api.aws" + }, + "eu-central-2" : { + "hostname" : "datazone.eu-central-2.api.aws" + }, + "eu-north-1" : { + "hostname" : "datazone.eu-north-1.api.aws" + }, + "eu-south-1" : { + "hostname" : "datazone.eu-south-1.api.aws" + }, + "eu-south-2" : { + "hostname" : "datazone.eu-south-2.api.aws" + }, + "eu-west-1" : { + "hostname" : "datazone.eu-west-1.api.aws" + }, + "eu-west-2" : { + "hostname" : "datazone.eu-west-2.api.aws" + }, + "eu-west-3" : { + "hostname" : "datazone.eu-west-3.api.aws" + }, + "il-central-1" : { + "hostname" : "datazone.il-central-1.api.aws" + }, + "me-central-1" : { + "hostname" : "datazone.me-central-1.api.aws" + }, + "me-south-1" : { + "hostname" : "datazone.me-south-1.api.aws" + }, + "sa-east-1" : { + "hostname" : "datazone.sa-east-1.api.aws" + }, + "us-east-1" : { + "hostname" : "datazone.us-east-1.api.aws", + "variants" : [ { + "hostname" : "datazone-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "hostname" : "datazone.us-east-2.api.aws", + "variants" : [ { + "hostname" : "datazone-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "hostname" : "datazone.us-west-1.api.aws" + }, + "us-west-2" : { + "hostname" : "datazone.us-west-2.api.aws", + "variants" : [ { + "hostname" : "datazone-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "dax" : { "endpoints" : { "ap-northeast-1" : { }, @@ -10228,11 +10353,14 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-west-1" : { }, @@ -10267,6 +10395,7 @@ "hostname" : "license-manager-fips.us-west-2.amazonaws.com" }, "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -18973,6 +19102,24 @@ "cn-northwest-1" : { } } }, + "datazone" : { + "defaults" : { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "variants" : [ { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "cn-north-1" : { + "hostname" : "datazone.cn-north-1.api.amazonwebservices.com.cn" + }, + "cn-northwest-1" : { + "hostname" : "datazone.cn-northwest-1.api.amazonwebservices.com.cn" + } + } + }, "dax" : { "endpoints" : { "cn-north-1" : { }, @@ -21160,6 +21307,24 @@ } } }, + "datazone" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "hostname" : "datazone.us-gov-east-1.api.aws" + }, + "us-gov-west-1" : { + "hostname" : "datazone.us-gov-west-1.api.aws" + } + } + }, "directconnect" : { "endpoints" : { "us-gov-east-1" : { @@ -22630,6 +22795,26 @@ } } }, + "m2" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "deprecated" : true + }, + "fips-us-gov-west-1" : { + "deprecated" : true + }, + "us-gov-east-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + } + } + }, "managedblockchain" : { "endpoints" : { "us-gov-west-1" : { } @@ -23751,8 +23936,18 @@ }, "simspaceweaver" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "simspaceweaver.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "simspaceweaver.us-gov-west-1.amazonaws.com" + } } }, "sms" : { diff --git a/models/fsx.json b/models/fsx.json index 62364beec1..7dbb7fc75a 100644 --- a/models/fsx.json +++ b/models/fsx.json @@ -143,6 +143,9 @@ { "target": "com.amazonaws.fsx#RestoreVolumeFromSnapshot" }, + { + "target": "com.amazonaws.fsx#StartMisconfiguredStateRecovery" + }, { "target": "com.amazonaws.fsx#TagResource" }, @@ -1390,6 +1393,12 @@ "traits": { "smithy.api#enumValue": "STORAGE_TYPE_OPTIMIZATION" } + }, + "MISCONFIGURED_STATE_RECOVERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISCONFIGURED_STATE_RECOVERY" + } } }, "traits": { @@ -3004,7 +3013,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Lustre configuration for the file system being created.

\n \n

The following parameters are not supported for file systems\n with a data repository association created with\n .

\n
    \n
  • \n

    \n AutoImportPolicy\n

    \n
  • \n
  • \n

    \n ExportPath\n

    \n
  • \n
  • \n

    \n ImportedChunkSize\n

    \n
  • \n
  • \n

    \n ImportPath\n

    \n
  • \n
\n
" + "smithy.api#documentation": "

The Lustre configuration for the file system being created.

\n \n

The following parameters are not supported for file systems\n with a data repository association created with\n .

\n
    \n
  • \n

    \n AutoImportPolicy\n

    \n
  • \n
  • \n

    \n ExportPath\n

    \n
  • \n
  • \n

    \n ImportedFileChunkSize\n

    \n
  • \n
  • \n

    \n ImportPath\n

    \n
  • \n
\n
" } }, "com.amazonaws.fsx#CreateFileSystemOntapConfiguration": { @@ -9677,6 +9686,60 @@ "smithy.api#error": "client" } }, + "com.amazonaws.fsx#StartMisconfiguredStateRecovery": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#StartMisconfiguredStateRecoveryRequest" + }, + "output": { + "target": "com.amazonaws.fsx#StartMisconfiguredStateRecoveryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#FileSystemNotFound" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + } + ], + "traits": { + "smithy.api#documentation": "

After performing steps to repair the Active Directory configuration of an FSx for Windows File Server file system, use this action to \n initiate the process of Amazon FSx attempting to reconnect to the file system.

" + } + }, + "com.amazonaws.fsx#StartMisconfiguredStateRecoveryRequest": { + "type": "structure", + "members": { + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + }, + "FileSystemId": { + "target": "com.amazonaws.fsx#FileSystemId", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.fsx#StartMisconfiguredStateRecoveryResponse": { + "type": "structure", + "members": { + "FileSystem": { + "target": "com.amazonaws.fsx#FileSystem" + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.fsx#StartTime": { "type": "timestamp" }, diff --git a/models/glue.json b/models/glue.json index bc42a27d0a..daa5cdd29e 100644 --- a/models/glue.json +++ b/models/glue.json @@ -29324,6 +29324,18 @@ "smithy.api#enumValue": "GITHUB" } }, + "GITLAB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GITLAB" + } + }, + "BITBUCKET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BITBUCKET" + } + }, "AWS_CODE_COMMIT": { "target": "smithy.api#Unit", "traits": { @@ -33614,13 +33626,13 @@ "Provider": { "target": "com.amazonaws.glue#SourceControlProvider", "traits": { - "smithy.api#documentation": "

The provider for the remote repository.

" + "smithy.api#documentation": "

\n The provider for the remote repository. Possible values: GITHUB, AWS_CODE_COMMIT, GITLAB, BITBUCKET.\n

" } }, "RepositoryName": { "target": "com.amazonaws.glue#NameString", "traits": { - "smithy.api#documentation": "

The name of the remote repository that contains the job artifacts.

" + "smithy.api#documentation": "

The name of the remote repository that contains the job artifacts. \n For BitBucket providers, RepositoryName should include WorkspaceName.\n Use the format /. \n

" } }, "RepositoryOwner": { @@ -34137,13 +34149,13 @@ "Provider": { "target": "com.amazonaws.glue#SourceControlProvider", "traits": { - "smithy.api#documentation": "

The provider for the remote repository.

" + "smithy.api#documentation": "

\n The provider for the remote repository. Possible values: GITHUB, AWS_CODE_COMMIT, GITLAB, BITBUCKET.\n

" } }, "RepositoryName": { "target": "com.amazonaws.glue#NameString", "traits": { - "smithy.api#documentation": "

The name of the remote repository that contains the job artifacts.

" + "smithy.api#documentation": "

The name of the remote repository that contains the job artifacts. \n For BitBucket providers, RepositoryName should include WorkspaceName.\n Use the format /. \n

" } }, "RepositoryOwner": { diff --git a/models/inspector2.json b/models/inspector2.json index 7e294dda22..1517804f47 100644 --- a/models/inspector2.json +++ b/models/inspector2.json @@ -1985,7 +1985,7 @@ "scanStatusCode": { "target": "com.amazonaws.inspector2#CoverageStringFilterList", "traits": { - "smithy.api#documentation": "

The scan status code to filter on.

" + "smithy.api#documentation": "

The scan status code to filter on. Valid values are: ValidationException, InternalServerException, ResourceNotFoundException, BadRequestException, and ThrottlingException.

" } }, "scanStatusReason": { @@ -2286,7 +2286,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a filter resource using specified filter criteria.

", + "smithy.api#documentation": "

Creates a filter resource using specified filter criteria. When the filter action is set to SUPPRESS this action creates a suppression rule.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3369,6 +3369,10 @@ { "value": "UNKNOWN", "name": "UNKNOWN" + }, + { + "value": "MACOS", + "name": "MACOS" } ] } @@ -4441,7 +4445,7 @@ "type": { "target": "com.amazonaws.inspector2#FindingType", "traits": { - "smithy.api#documentation": "

The type of the finding.

", + "smithy.api#documentation": "

The type of the finding. The type value determines the valid values for resource in your request. For more information, see Finding types in the Amazon Inspector user guide.

", "smithy.api#required": {} } }, @@ -4468,7 +4472,7 @@ "severity": { "target": "com.amazonaws.inspector2#Severity", "traits": { - "smithy.api#documentation": "

The severity of the finding.

", + "smithy.api#documentation": "

The severity of the finding. UNTRIAGED applies to PACKAGE_VULNERABILITY type findings that the vendor has not assigned a severity yet. For more information, see Severity levels for findings in the Amazon Inspector user guide.

", "smithy.api#required": {} } }, @@ -4502,7 +4506,7 @@ "resources": { "target": "com.amazonaws.inspector2#ResourceList", "traits": { - "smithy.api#documentation": "

Contains information on the resources involved in a finding.

", + "smithy.api#documentation": "

Contains information on the resources involved in a finding. The resource value determines the valid values for type in your request. For more information, see Finding types in the Amazon Inspector user guide.

", "smithy.api#required": {} } }, @@ -5825,7 +5829,7 @@ "*" ] }, - "smithy.api#documentation": "

Amazon Inspector is a vulnerability discovery service that automates continuous scanning for\n security vulnerabilities within your Amazon EC2 and Amazon ECR environments.

", + "smithy.api#documentation": "

Amazon Inspector is a vulnerability discovery service that automates continuous scanning for\n security vulnerabilities within your Amazon EC2, Amazon ECR, and Amazon Web Services Lambda environments.

", "smithy.api#title": "Inspector2", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -7122,13 +7126,13 @@ "maxResults": { "target": "com.amazonaws.inspector2#ListAccountPermissionsMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

" } } } @@ -7200,13 +7204,13 @@ "maxResults": { "target": "com.amazonaws.inspector2#ListCoverageMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } }, "filterCriteria": { @@ -7357,13 +7361,13 @@ "maxResults": { "target": "com.amazonaws.inspector2#ListDelegatedAdminMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } } } @@ -7458,13 +7462,13 @@ "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } }, "maxResults": { "target": "com.amazonaws.inspector2#ListFilterMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } } } @@ -7543,13 +7547,13 @@ "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } }, "maxResults": { "target": "com.amazonaws.inspector2#ListFindingAggregationsMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "accountIds": { @@ -7639,13 +7643,13 @@ "maxResults": { "target": "com.amazonaws.inspector2#ListFindingsMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } }, "filterCriteria": { @@ -7737,13 +7741,13 @@ "maxResults": { "target": "com.amazonaws.inspector2#ListMembersMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "nextToken": { "target": "com.amazonaws.inspector2#NextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } } } @@ -7880,13 +7884,13 @@ "maxResults": { "target": "com.amazonaws.inspector2#ListUsageTotalsMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response.

" + "smithy.api#documentation": "

The maximum number of results the response can return. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

" } }, "nextToken": { "target": "com.amazonaws.inspector2#ListUsageTotalsNextToken", "traits": { - "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

" + "smithy.api#documentation": "

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. If your response returns more than the maxResults maximum value it will also return a nextToken value. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

" } }, "accountIds": { @@ -9557,7 +9561,7 @@ "reason": { "target": "com.amazonaws.inspector2#ScanStatusReason", "traits": { - "smithy.api#documentation": "

The reason for the scan.

", + "smithy.api#documentation": "

The scan status. Possible return values and descriptions are:

\n

\n PENDING_INITIAL_SCAN - This resource has been identified for scanning, results will be available soon.

\n

\n ACCESS_DENIED - Resource access policy restricting Amazon Inspector access. Please update the IAM policy.

\n

\n INTERNAL_ERROR - Amazon Inspector has encountered an internal error for this resource. Amazon Inspector service will automatically resolve the issue and resume the scanning. No action required from the user.

\n

\n UNMANAGED_EC2_INSTANCE - The EC2 instance is not managed by SSM, please use the following SSM automation to remediate the issue: https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshoot-managed-instance.html. Once the instance becomes managed by SSM, Inspector will automatically begin scanning this instance.

\n

\n UNSUPPORTED_OS - Amazon Inspector does not support this OS, architecture, or image manifest type at this time. To see a complete list of supported operating systems see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

\n

\n SCAN_ELIGIBILITY_EXPIRED - The configured scan duration has lapsed for this image.

\n

\n RESOURCE_TERMINATED - This resource has been terminated. The findings and coverage associated with this resource are in the process of being cleaned up.

\n

\n SUCCESSFUL - The scan was successful.

\n

\n NO_RESOURCES_FOUND - Reserved for future use.

\n

\n IMAGE_SIZE_EXCEEDED - Reserved for future use.

\n

\n SCAN_FREQUENCY_MANUAL - This image will not be covered by Amazon Inspector due to the repository scan frequency configuration.

\n

\n SCAN_FREQUENCY_SCAN_ON_PUSH - This image will be scanned one time and will not new findings because of the scan frequency configuration.

\n

\n EC2_INSTANCE_STOPPED - This EC2 instance is in a stopped state, therefore, Amazon Inspector will pause scanning. The existing findings will continue to exist until the instance is terminated. Once the instance is re-started, Inspector will automatically start scanning the instance again. Please note that you will not be charged for this instance while it’s in a stopped state.

\n

\n PENDING_DISABLE - This resource is pending cleanup during disablement. The customer will not be billed while a resource is in the pending disable status.

\n

\n NO INVENTORY - Amazon Inspector couldn’t find software application inventory to scan for vulnerabilities. This might be caused due to required Amazon Inspector associations being deleted or failing to run on your resource. Please verify the status of InspectorInventoryCollection-do-not-delete association in the SSM console for the resource. Additionally, you can verify the instance’s inventory in the SSM Fleet Manager console.

\n

\n STALE_INVENTORY - Amazon Inspector wasn’t able to collect an updated software application inventory in the last 7 days. Please confirm the required Amazon Inspector associations still exist and you can still see an updated inventory in the SSM console.

\n

\n EXCLUDED_BY_TAG - This resource was not scanned because it has been excluded by a tag.

\n

\n UNSUPPORTED_RUNTIME - The function was not scanned because it has an unsupported runtime. To see a complete list of supported runtimes see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

\n

\n UNSUPPORTED_MEDIA_TYPE - The ECR image has an unsupported media type.

\n

\n UNSUPPORTED_CONFIG_FILE - Reserved for future use.

\n

\n DEEP_INSPECTION_PACKAGE_COLLECTION_LIMIT_EXCEEDED - The instance has exceeded the 5000 package limit for Amazon Inspector Deep inspection. To resume Deep inspection for this instance you can try to adjust the custom paths associated with the account.

\n

\n DEEP_INSPECTION_DAILY_SSM_INVENTORY_LIMIT_EXCEEDED - The SSM agent couldn't send inventory to Amazon Inspector because the SSM quota for Inventory data collected per instance per day has already been reached for this instance.

\n

\n DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED - Amazon Inspector failed to extract the package inventory because the package collection time exceeding the maximum threshold of 15 minutes.

\n

\n DEEP_INSPECTION_NO_INVENTORY The Amazon Inspector plugin hasn't yet been able to collect an inventory of packages for this instance. This is usually the result of a pending scan, however, if this status persists after 6 hours, use SSM to ensure that the required Amazon Inspector associations exist and are running for the instance.

\n

", "smithy.api#required": {} } } @@ -11260,7 +11264,7 @@ "source": { "target": "com.amazonaws.inspector2#VulnerabilitySource", "traits": { - "smithy.api#documentation": "

The source of the vulnerability information.

" + "smithy.api#documentation": "

The source of the vulnerability information. Possible results are RHEL, AMAZON_CVE, DEBIAN or NVD.

" } }, "description": { diff --git a/models/ivs-realtime.json b/models/ivs-realtime.json index f0865661fe..aa101f8962 100644 --- a/models/ivs-realtime.json +++ b/models/ivs-realtime.json @@ -1846,6 +1846,42 @@ "smithy.api#default": false, "smithy.api#documentation": "

Whether the participant ever published to the stage session.

" } + }, + "ispName": { + "target": "com.amazonaws.ivsrealtime#ParticipantClientAttribute", + "traits": { + "smithy.api#documentation": "

The participant’s Internet Service Provider.

" + } + }, + "osName": { + "target": "com.amazonaws.ivsrealtime#ParticipantClientAttribute", + "traits": { + "smithy.api#documentation": "

The participant’s operating system.

" + } + }, + "osVersion": { + "target": "com.amazonaws.ivsrealtime#ParticipantClientAttribute", + "traits": { + "smithy.api#documentation": "

The participant’s operating system version.

" + } + }, + "browserName": { + "target": "com.amazonaws.ivsrealtime#ParticipantClientAttribute", + "traits": { + "smithy.api#documentation": "

The participant’s browser.

" + } + }, + "browserVersion": { + "target": "com.amazonaws.ivsrealtime#ParticipantClientAttribute", + "traits": { + "smithy.api#documentation": "

The participant’s browser version.

" + } + }, + "sdkVersion": { + "target": "com.amazonaws.ivsrealtime#ParticipantClientAttribute", + "traits": { + "smithy.api#documentation": "

The participant’s SDK version.

" + } } }, "traits": { @@ -1861,6 +1897,16 @@ "target": "com.amazonaws.ivsrealtime#String" } }, + "com.amazonaws.ivsrealtime#ParticipantClientAttribute": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-_.,:;\\s]*$" + } + }, "com.amazonaws.ivsrealtime#ParticipantId": { "type": "string" }, diff --git a/models/lambda.json b/models/lambda.json index 9f2a524dd9..5fef5ae703 100644 --- a/models/lambda.json +++ b/models/lambda.json @@ -8546,6 +8546,9 @@ } } }, + "com.amazonaws.lambda#NullableBoolean": { + "type": "boolean" + }, "com.amazonaws.lambda#OnFailure": { "type": "structure", "members": { @@ -11735,6 +11738,12 @@ "traits": { "smithy.api#documentation": "

A list of VPC security group IDs.

" } + }, + "Ipv6AllowedForDualStack": { + "target": "com.amazonaws.lambda#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.

" + } } }, "traits": { @@ -11761,6 +11770,12 @@ "traits": { "smithy.api#documentation": "

The ID of the VPC.

" } + }, + "Ipv6AllowedForDualStack": { + "target": "com.amazonaws.lambda#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.

" + } } }, "traits": { diff --git a/models/location.json b/models/location.json index 911158f4ad..a2bc0559a9 100644 --- a/models/location.json +++ b/models/location.json @@ -179,7 +179,7 @@ "traits": { "smithy.api#documentation": "

Creates an association between a geofence collection and a tracker resource. This\n allows the tracker resource to communicate location data to the linked geofence\n collection.

\n

You can associate up to five geofence collections to each tracker resource.

\n \n

Currently not supported — Cross-account configurations, such as creating associations between a tracker resource in one account and a geofence collection in another account.

\n
", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers/{TrackerName}/consumers", @@ -1533,7 +1533,7 @@ "traits": { "smithy.api#documentation": "

Creates a geofence collection, which manages and stores geofences.

", "smithy.api#endpoint": { - "hostPrefix": "geofencing." + "hostPrefix": "cp.geofencing." }, "smithy.api#http": { "uri": "/geofencing/v0/collections", @@ -1649,7 +1649,7 @@ "traits": { "smithy.api#documentation": "

Creates an API key resource in your Amazon Web Services account, which lets you grant\n actions for Amazon Location resources to the API key bearer.

\n \n

For more information, see Using API keys.

\n
", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "uri": "/metadata/v0/keys", @@ -1765,7 +1765,7 @@ "traits": { "smithy.api#documentation": "

Creates a map resource in your Amazon Web Services account, which provides map tiles of different\n styles sourced from global location data providers.

\n \n

If your application is tracking or routing assets you use in your business, such \n as delivery vehicles or employees, you must not use Esri as your geolocation \n provider. See section 82 of the Amazon Web Services\n service terms for more details.

\n
", "smithy.api#endpoint": { - "hostPrefix": "maps." + "hostPrefix": "cp.maps." }, "smithy.api#http": { "uri": "/maps/v0/maps", @@ -1872,7 +1872,7 @@ "traits": { "smithy.api#documentation": "

Creates a place index resource in your Amazon Web Services account. Use a place index resource to\n geocode addresses and other text queries by using the\n SearchPlaceIndexForText operation, and reverse geocode coordinates by\n using the SearchPlaceIndexForPosition operation, and enable autosuggestions\n by using the SearchPlaceIndexForSuggestions operation.

\n \n

If your application is tracking or routing assets you use in your business, such \n as delivery vehicles or employees, you must not use Esri as your geolocation \n provider. See section 82 of the Amazon Web Services\n service terms for more details.

\n
", "smithy.api#endpoint": { - "hostPrefix": "places." + "hostPrefix": "cp.places." }, "smithy.api#http": { "uri": "/places/v0/indexes", @@ -1985,7 +1985,7 @@ "traits": { "smithy.api#documentation": "

Creates a route calculator resource in your Amazon Web Services account.

\n

You can send requests to a route calculator resource to estimate travel time,\n distance, and get directions. A route calculator sources traffic and road network data\n from your chosen data provider.

\n \n

If your application is tracking or routing assets you use in your business, such \n as delivery vehicles or employees, you must not use Esri as your geolocation \n provider. See section 82 of the Amazon Web Services\n service terms for more details.

\n
", "smithy.api#endpoint": { - "hostPrefix": "routes." + "hostPrefix": "cp.routes." }, "smithy.api#http": { "uri": "/routes/v0/calculators", @@ -2092,7 +2092,7 @@ "traits": { "smithy.api#documentation": "

Creates a tracker resource in your Amazon Web Services account, which lets you retrieve current and\n historical location of devices.

", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers", @@ -2237,7 +2237,7 @@ "traits": { "smithy.api#documentation": "

Deletes a geofence collection from your Amazon Web Services account.

\n \n

This operation deletes the resource permanently. If the geofence collection is the\n target of a tracker resource, the devices will no longer be monitored.

\n
", "smithy.api#endpoint": { - "hostPrefix": "geofencing." + "hostPrefix": "cp.geofencing." }, "smithy.api#http": { "uri": "/geofencing/v0/collections/{CollectionName}", @@ -2291,7 +2291,7 @@ "traits": { "smithy.api#documentation": "

Deletes the specified API key. The API key must have been deactivated more than\n 90 days previously.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "uri": "/metadata/v0/keys/{KeyName}", @@ -2345,7 +2345,7 @@ "traits": { "smithy.api#documentation": "

Deletes a map resource from your Amazon Web Services account.

\n \n

This operation deletes the resource permanently. If the map is being used in an application,\n the map may not render.

\n
", "smithy.api#endpoint": { - "hostPrefix": "maps." + "hostPrefix": "cp.maps." }, "smithy.api#http": { "uri": "/maps/v0/maps/{MapName}", @@ -2400,7 +2400,7 @@ "traits": { "smithy.api#documentation": "

Deletes a place index resource from your Amazon Web Services account.

\n \n

This operation deletes the resource permanently.

\n
", "smithy.api#endpoint": { - "hostPrefix": "places." + "hostPrefix": "cp.places." }, "smithy.api#http": { "uri": "/places/v0/indexes/{IndexName}", @@ -2455,7 +2455,7 @@ "traits": { "smithy.api#documentation": "

Deletes a route calculator resource from your Amazon Web Services account.

\n \n

This operation deletes the resource permanently.

\n
", "smithy.api#endpoint": { - "hostPrefix": "routes." + "hostPrefix": "cp.routes." }, "smithy.api#http": { "uri": "/routes/v0/calculators/{CalculatorName}", @@ -2510,7 +2510,7 @@ "traits": { "smithy.api#documentation": "

Deletes a tracker resource from your Amazon Web Services account.

\n \n

This operation deletes the resource permanently. If the tracker resource is in use, you may\n encounter an error. Make sure that the target resource isn't a dependency for your\n applications.

\n
", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers/{TrackerName}", @@ -2564,7 +2564,7 @@ "traits": { "smithy.api#documentation": "

Retrieves the geofence collection details.

", "smithy.api#endpoint": { - "hostPrefix": "geofencing." + "hostPrefix": "cp.geofencing." }, "smithy.api#http": { "uri": "/geofencing/v0/collections/{CollectionName}", @@ -2695,7 +2695,7 @@ "traits": { "smithy.api#documentation": "

Retrieves the API key resource details.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "uri": "/metadata/v0/keys/{KeyName}", @@ -2810,7 +2810,7 @@ "traits": { "smithy.api#documentation": "

Retrieves the map resource details.

", "smithy.api#endpoint": { - "hostPrefix": "maps." + "hostPrefix": "cp.maps." }, "smithy.api#http": { "uri": "/maps/v0/maps/{MapName}", @@ -2930,7 +2930,7 @@ "traits": { "smithy.api#documentation": "

Retrieves the place index resource details.

", "smithy.api#endpoint": { - "hostPrefix": "places." + "hostPrefix": "cp.places." }, "smithy.api#http": { "uri": "/places/v0/indexes/{IndexName}", @@ -3050,7 +3050,7 @@ "traits": { "smithy.api#documentation": "

Retrieves the route calculator resource details.

", "smithy.api#endpoint": { - "hostPrefix": "routes." + "hostPrefix": "cp.routes." }, "smithy.api#http": { "uri": "/routes/v0/calculators/{CalculatorName}", @@ -3163,7 +3163,7 @@ "traits": { "smithy.api#documentation": "

Retrieves the tracker resource details.

", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers/{TrackerName}", @@ -3422,7 +3422,7 @@ "traits": { "smithy.api#documentation": "

Removes the association between a tracker resource and a geofence collection.

\n \n

Once you unlink a tracker resource from a geofence collection, the tracker\n positions will no longer be automatically evaluated against geofences.

\n
", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}", @@ -4727,7 +4727,7 @@ "traits": { "smithy.api#documentation": "

Lists geofence collections in your Amazon Web Services account.

", "smithy.api#endpoint": { - "hostPrefix": "geofencing." + "hostPrefix": "cp.geofencing." }, "smithy.api#http": { "uri": "/geofencing/v0/list-collections", @@ -5013,7 +5013,7 @@ "traits": { "smithy.api#documentation": "

Lists API key resources in your Amazon Web Services account.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "uri": "/metadata/v0/list-keys", @@ -5152,7 +5152,7 @@ "traits": { "smithy.api#documentation": "

Lists map resources in your Amazon Web Services account.

", "smithy.api#endpoint": { - "hostPrefix": "maps." + "hostPrefix": "cp.maps." }, "smithy.api#http": { "uri": "/maps/v0/list-maps", @@ -5290,7 +5290,7 @@ "traits": { "smithy.api#documentation": "

Lists place index resources in your Amazon Web Services account.

", "smithy.api#endpoint": { - "hostPrefix": "places." + "hostPrefix": "cp.places." }, "smithy.api#http": { "uri": "/places/v0/list-indexes", @@ -5428,7 +5428,7 @@ "traits": { "smithy.api#documentation": "

Lists route calculator resources in your Amazon Web Services account.

", "smithy.api#endpoint": { - "hostPrefix": "routes." + "hostPrefix": "cp.routes." }, "smithy.api#http": { "uri": "/routes/v0/list-calculators", @@ -5569,7 +5569,7 @@ "traits": { "smithy.api#documentation": "

Returns a list of tags that are applied to the specified Amazon Location resource.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "method": "GET", @@ -5630,7 +5630,7 @@ "traits": { "smithy.api#documentation": "

Lists geofence collections currently associated to the given tracker resource.

", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers/{TrackerName}/list-consumers", @@ -5717,7 +5717,7 @@ "traits": { "smithy.api#documentation": "

Lists tracker resources in your Amazon Web Services account.

", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/list-trackers", @@ -7973,7 +7973,7 @@ "traits": { "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified Amazon Location Service\n resource.

\n

Tags can help you organize and categorize your resources. You can also use them to\n scope user permissions, by granting a user permission to access or change only resources\n with certain tag values.

\n

You can use the TagResource operation with an Amazon Location Service\n resource that already has tags. If you specify a new tag key for the resource, this tag\n is appended to the tags already associated with the resource. If you specify a tag key\n that's already associated with the resource, the new tag value that you specify replaces\n the previous value for that tag.

\n

You can associate up to 50 tags with a resource.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "method": "POST", @@ -8258,7 +8258,7 @@ "traits": { "smithy.api#documentation": "

Removes one or more tags from the specified Amazon Location resource.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "method": "DELETE", @@ -8320,7 +8320,7 @@ "traits": { "smithy.api#documentation": "

Updates the specified properties of a given geofence collection.

", "smithy.api#endpoint": { - "hostPrefix": "geofencing." + "hostPrefix": "cp.geofencing." }, "smithy.api#http": { "uri": "/geofencing/v0/collections/{CollectionName}", @@ -8422,7 +8422,7 @@ "traits": { "smithy.api#documentation": "

Updates the specified properties of a given API key resource.

", "smithy.api#endpoint": { - "hostPrefix": "metadata." + "hostPrefix": "cp.metadata." }, "smithy.api#http": { "uri": "/metadata/v0/keys/{KeyName}", @@ -8528,7 +8528,7 @@ "traits": { "smithy.api#documentation": "

Updates the specified properties of a given map resource.

", "smithy.api#endpoint": { - "hostPrefix": "maps." + "hostPrefix": "cp.maps." }, "smithy.api#http": { "uri": "/maps/v0/maps/{MapName}", @@ -8626,7 +8626,7 @@ "traits": { "smithy.api#documentation": "

Updates the specified properties of a given place index resource.

", "smithy.api#endpoint": { - "hostPrefix": "places." + "hostPrefix": "cp.places." }, "smithy.api#http": { "uri": "/places/v0/indexes/{IndexName}", @@ -8724,7 +8724,7 @@ "traits": { "smithy.api#documentation": "

Updates the specified properties for a given route calculator resource.

", "smithy.api#endpoint": { - "hostPrefix": "routes." + "hostPrefix": "cp.routes." }, "smithy.api#http": { "uri": "/routes/v0/calculators/{CalculatorName}", @@ -8816,7 +8816,7 @@ "traits": { "smithy.api#documentation": "

Updates the specified properties of a given tracker resource.

", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "cp.tracking." }, "smithy.api#http": { "uri": "/tracking/v0/trackers/{TrackerName}", diff --git a/models/machine-learning.json b/models/machine-learning.json index 9a75100130..670c659233 100644 --- a/models/machine-learning.json +++ b/models/machine-learning.json @@ -4313,7 +4313,8 @@ "smithy.api#length": { "min": 8, "max": 128 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.machinelearning#RDSDatabaseUsername": { @@ -4594,7 +4595,8 @@ "smithy.api#length": { "min": 8, "max": 64 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.machinelearning#RedshiftDatabaseUsername": { diff --git a/models/marketplace-catalog.json b/models/marketplace-catalog.json index ca72e87c52..dc1a455da8 100644 --- a/models/marketplace-catalog.json +++ b/models/marketplace-catalog.json @@ -834,7 +834,7 @@ "ChangeType": { "target": "com.amazonaws.marketplacecatalog#ChangeType", "traits": { - "smithy.api#documentation": "

Change types are single string values that describe your intention for the change.\n Each change type is unique for each EntityType provided in the change's\n scope. For more information on change types available for single-AMI products, see\n Working with single-AMI products. Also, for more information on change\n types available for container-based products, see Working with container products.

", + "smithy.api#documentation": "

Change types are single string values that describe your intention for the change.\n Each change type is unique for each EntityType provided in the change's\n scope. For more information on change types available for single-AMI products, see\n Working with single-AMI products. Also, for more information about change\n types available for container-based products, see Working with container products.

", "smithy.api#required": {} } }, @@ -854,8 +854,13 @@ "Details": { "target": "com.amazonaws.marketplacecatalog#Json", "traits": { - "smithy.api#documentation": "

This object contains details specific to the change type of the requested\n change. For more\n information on change types available for single-AMI products, see Working with single-AMI products. Also, for more information on change\n types available for container-based products, see Working with container products.

", - "smithy.api#required": {} + "smithy.api#documentation": "

This object contains details specific to the change type of the requested change. For\n more information about change types available for single-AMI products, see Working with single-AMI products. Also, for more information about change\n types available for container-based products, see Working with container products.

" + } + }, + "DetailsDocument": { + "target": "com.amazonaws.marketplacecatalog#JsonDocumentType", + "traits": { + "smithy.api#documentation": "

Alternative field that accepts a JSON value instead of a string for\n ChangeType details. You can use either Details or\n DetailsDocument, but not both.

" } }, "ChangeName": { @@ -1013,6 +1018,12 @@ "smithy.api#documentation": "

This object contains details specific to the change type of the requested\n change.

" } }, + "DetailsDocument": { + "target": "com.amazonaws.marketplacecatalog#JsonDocumentType", + "traits": { + "smithy.api#documentation": "

The JSON value of the details specific to the change type of the requested change.

" + } + }, "ErrorDetailList": { "target": "com.amazonaws.marketplacecatalog#ErrorDetailList", "traits": { @@ -1086,7 +1097,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a resource-based policy on an Entity that is identified by its resource\n ARN.

", + "smithy.api#documentation": "

Deletes a resource-based policy on an entity that is identified by its resource\n ARN.

", "smithy.api#http": { "method": "DELETE", "uri": "/DeleteResourcePolicy", @@ -1100,7 +1111,7 @@ "ResourceArn": { "target": "com.amazonaws.marketplacecatalog#ResourceARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Entity resource that is associated with the\n resource policy.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the entity resource that is associated with the\n resource policy.

", "smithy.api#httpQuery": "resourceArn", "smithy.api#required": {} } @@ -1330,6 +1341,12 @@ "traits": { "smithy.api#documentation": "

This stringified JSON object includes the details of the entity.

" } + }, + "DetailsDocument": { + "target": "com.amazonaws.marketplacecatalog#JsonDocumentType", + "traits": { + "smithy.api#documentation": "

The JSON value of the details specific to the entity.

" + } } }, "traits": { @@ -1568,7 +1585,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a resource-based policy of an Entity that is identified by its resource\n ARN.

", + "smithy.api#documentation": "

Gets a resource-based policy of an entity that is identified by its resource\n ARN.

", "smithy.api#http": { "method": "GET", "uri": "/GetResourcePolicy", @@ -1582,7 +1599,7 @@ "ResourceArn": { "target": "com.amazonaws.marketplacecatalog#ResourceARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Entity resource that is associated with the\n resource policy.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the entity resource that is associated with the\n resource policy.

", "smithy.api#httpQuery": "resourceArn", "smithy.api#required": {} } @@ -1639,6 +1656,9 @@ "smithy.api#pattern": "^[\\s]*\\{[\\s\\S]*\\}[\\s]*$" } }, + "com.amazonaws.marketplacecatalog#JsonDocumentType": { + "type": "document" + }, "com.amazonaws.marketplacecatalog#ListChangeSets": { "type": "operation", "input": { @@ -1835,7 +1855,10 @@ } }, "OwnershipType": { - "target": "com.amazonaws.marketplacecatalog#OwnershipType" + "target": "com.amazonaws.marketplacecatalog#OwnershipType", + "traits": { + "smithy.api#documentation": "

Filters the returned set of entities based on their owner. The default is\n SELF. To list entities shared with you\n through AWS Resource Access Manager (AWS RAM), set to SHARED. Entities shared through the AWS Marketplace\n Catalog API PutResourcePolicy operation can't be discovered through the\n SHARED parameter.

" + } } }, "traits": { @@ -1984,7 +2007,7 @@ } ], "traits": { - "smithy.api#documentation": "

Attaches a resource-based policy to an Entity. Examples of an entity include:\n AmiProduct and ContainerProduct.

", + "smithy.api#documentation": "

Attaches a resource-based policy to an entity. Examples of an entity include:\n AmiProduct and ContainerProduct.

", "smithy.api#http": { "method": "POST", "uri": "/PutResourcePolicy", @@ -1998,7 +2021,7 @@ "ResourceArn": { "target": "com.amazonaws.marketplacecatalog#ResourceARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Entity resource you want to associate with a\n resource policy.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the entity resource you want to associate with a\n resource policy.

", "smithy.api#required": {} } }, @@ -2127,7 +2150,7 @@ "SortBy": { "target": "com.amazonaws.marketplacecatalog#SortBy", "traits": { - "smithy.api#documentation": "

For ListEntities, supported attributes include\n LastModifiedDate (default), Visibility,\n EntityId, and Name.

\n

For ListChangeSets, supported attributes include StartTime\n and EndTime.

" + "smithy.api#documentation": "

For ListEntities, supported attributes include\n LastModifiedDate (default) and EntityId. In addition to\n LastModifiedDate and EntityId, each\n EntityType might support additional fields.

\n

For ListChangeSets, supported attributes include StartTime\n and EndTime.

" } }, "SortOrder": { @@ -2200,7 +2223,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to request changes for your entities. Within a single\n ChangeSet, you can't start the same change type against the same entity\n multiple times. Additionally, when a ChangeSet is running, all the entities\n targeted by the different changes are locked until the change set has completed (either\n succeeded, cancelled, or failed). If you try to start a change set containing a change\n against an entity that is already locked, you will receive a\n ResourceInUseException error.

\n

For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same\n change type (AddRevisions) against the same entity\n (entity-id@1).

\n

For more information about working with change sets, see Working with change sets. For information on change types for single-AMI\n products, see Working with single-AMI products. Als, for more information on change types\n available for container-based products, see Working with container products.

", + "smithy.api#documentation": "

Allows you to request changes for your entities. Within a single\n ChangeSet, you can't start the same change type against the same entity\n multiple times. Additionally, when a ChangeSet is running, all the entities\n targeted by the different changes are locked until the change set has completed (either\n succeeded, cancelled, or failed). If you try to start a change set containing a change\n against an entity that is already locked, you will receive a\n ResourceInUseException error.

\n

For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same\n change type (AddRevisions) against the same entity\n (entity-id@1).

\n

For more information about working with change sets, see Working with change sets. For information about change types for\n single-AMI products, see Working with single-AMI products. Also, for more information about change\n types available for container-based products, see Working with container products.

", "smithy.api#http": { "method": "POST", "uri": "/StartChangeSet", diff --git a/models/mediatailor.json b/models/mediatailor.json index ff181eaf8b..b916048d1f 100644 --- a/models/mediatailor.json +++ b/models/mediatailor.json @@ -95,6 +95,31 @@ "target": "com.amazonaws.mediatailor#KeyValuePair" } }, + "com.amazonaws.mediatailor#AdBreakOpportunities": { + "type": "list", + "member": { + "target": "com.amazonaws.mediatailor#AdBreakOpportunity" + }, + "traits": { + "smithy.api#documentation": "

The list of ad break opportunities detected within the VOD source.

" + } + }, + "com.amazonaws.mediatailor#AdBreakOpportunity": { + "type": "structure", + "members": { + "OffsetMillis": { + "target": "com.amazonaws.mediatailor#__long", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The offset in milliseconds from the start of the VOD source at which an ad marker was detected.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A location at which a zero-duration ad marker was detected in a VOD source manifest.

" + } + }, "com.amazonaws.mediatailor#AdMarkerPassthrough": { "type": "structure", "members": { @@ -2205,6 +2230,12 @@ "com.amazonaws.mediatailor#DescribeVodSourceResponse": { "type": "structure", "members": { + "AdBreakOpportunities": { + "target": "com.amazonaws.mediatailor#AdBreakOpportunities", + "traits": { + "smithy.api#documentation": "

The ad break opportunities within the VOD source.

" + } + }, "Arn": { "target": "com.amazonaws.mediatailor#__string", "traits": { diff --git a/models/mgn.json b/models/mgn.json index 80c6e633ae..2c694fdfda 100644 --- a/models/mgn.json +++ b/models/mgn.json @@ -355,6 +355,9 @@ { "target": "com.amazonaws.mgn#ApplicationResource" }, + { + "target": "com.amazonaws.mgn#ConnectorResource" + }, { "target": "com.amazonaws.mgn#ExportResource" }, @@ -1807,6 +1810,161 @@ "target": "com.amazonaws.mgn#ErrorDetails" } }, + "com.amazonaws.mgn#Connector": { + "type": "structure", + "members": { + "connectorID": { + "target": "com.amazonaws.mgn#ConnectorID", + "traits": { + "smithy.api#documentation": "

Connector ID.

" + } + }, + "name": { + "target": "com.amazonaws.mgn#ConnectorName", + "traits": { + "smithy.api#documentation": "

Connector name.

" + } + }, + "ssmInstanceID": { + "target": "com.amazonaws.mgn#SsmInstanceID", + "traits": { + "smithy.api#documentation": "

Connector SSM instance ID.

" + } + }, + "arn": { + "target": "com.amazonaws.mgn#ARN", + "traits": { + "smithy.api#documentation": "

Connector arn.

" + } + }, + "tags": { + "target": "com.amazonaws.mgn#TagsMap", + "traits": { + "smithy.api#documentation": "

Connector tags.

" + } + }, + "ssmCommandConfig": { + "target": "com.amazonaws.mgn#ConnectorSsmCommandConfig", + "traits": { + "smithy.api#documentation": "

Connector SSM command config.

" + } + } + }, + "traits": { + "smithy.api#references": [ + { + "resource": "com.amazonaws.mgn#ConnectorResource" + } + ] + } + }, + "com.amazonaws.mgn#ConnectorArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 27, + "max": 100 + }, + "smithy.api#pattern": "^arn:[\\w-]+:mgn:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:connector\\/(connector-[0-9a-zA-Z]{17})$" + } + }, + "com.amazonaws.mgn#ConnectorID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 27, + "max": 27 + }, + "smithy.api#pattern": "^connector-[0-9a-zA-Z]{17}$" + } + }, + "com.amazonaws.mgn#ConnectorIDsFilter": { + "type": "list", + "member": { + "target": "com.amazonaws.mgn#ConnectorID" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.mgn#ConnectorName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9_-]+$" + } + }, + "com.amazonaws.mgn#ConnectorResource": { + "type": "resource", + "identifiers": { + "connectorID": { + "target": "com.amazonaws.mgn#ConnectorID" + } + }, + "create": { + "target": "com.amazonaws.mgn#CreateConnector" + }, + "update": { + "target": "com.amazonaws.mgn#UpdateConnector" + }, + "delete": { + "target": "com.amazonaws.mgn#DeleteConnector" + }, + "list": { + "target": "com.amazonaws.mgn#ListConnectors" + }, + "traits": { + "aws.api#arn": { + "template": "connector/{connectorID}" + } + } + }, + "com.amazonaws.mgn#ConnectorSsmCommandConfig": { + "type": "structure", + "members": { + "s3OutputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Connector SSM command config S3 output enabled.

", + "smithy.api#required": {} + } + }, + "outputS3BucketName": { + "target": "com.amazonaws.mgn#S3BucketName", + "traits": { + "smithy.api#documentation": "

Connector SSM command config output S3 bucket name.

" + } + }, + "cloudWatchOutputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Connector SSM command config CloudWatch output enabled.

", + "smithy.api#required": {} + } + }, + "cloudWatchLogGroupName": { + "target": "com.amazonaws.mgn#CloudWatchLogGroupName", + "traits": { + "smithy.api#documentation": "

Connector SSM command config CloudWatch log group name.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Connector SSM command config.

" + } + }, + "com.amazonaws.mgn#ConnectorsList": { + "type": "list", + "member": { + "target": "com.amazonaws.mgn#Connector" + } + }, "com.amazonaws.mgn#Cpus": { "type": "list", "member": { @@ -1878,6 +2036,66 @@ } } }, + "com.amazonaws.mgn#CreateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.mgn#CreateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.mgn#Connector" + }, + "errors": [ + { + "target": "com.amazonaws.mgn#UninitializedAccountException" + }, + { + "target": "com.amazonaws.mgn#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Create Connector.

", + "smithy.api#http": { + "uri": "/CreateConnector", + "method": "POST", + "code": 201 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mgn#CreateConnectorRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.mgn#ConnectorName", + "traits": { + "smithy.api#documentation": "

Create Connector request name.

", + "smithy.api#required": {} + } + }, + "ssmInstanceID": { + "target": "com.amazonaws.mgn#SsmInstanceID", + "traits": { + "smithy.api#documentation": "

Create Connector request SSM instance ID.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.mgn#TagsMap", + "traits": { + "smithy.api#documentation": "

Create Connector request tags.

" + } + }, + "ssmCommandConfig": { + "target": "com.amazonaws.mgn#ConnectorSsmCommandConfig", + "traits": { + "smithy.api#documentation": "

Create Connector request SSM command config.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.mgn#CreateLaunchConfigurationTemplate": { "type": "operation", "input": { @@ -2612,6 +2830,50 @@ "type": "structure", "members": {} }, + "com.amazonaws.mgn#DeleteConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.mgn#DeleteConnectorRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.mgn#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mgn#UninitializedAccountException" + }, + { + "target": "com.amazonaws.mgn#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete Connector.

", + "smithy.api#http": { + "uri": "/DeleteConnector", + "method": "POST", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mgn#DeleteConnectorRequest": { + "type": "structure", + "members": { + "connectorID": { + "target": "com.amazonaws.mgn#ConnectorID", + "traits": { + "smithy.api#documentation": "

Delete Connector request connector ID.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.mgn#DeleteJob": { "type": "operation", "input": { @@ -5602,6 +5864,99 @@ } } }, + "com.amazonaws.mgn#ListConnectors": { + "type": "operation", + "input": { + "target": "com.amazonaws.mgn#ListConnectorsRequest" + }, + "output": { + "target": "com.amazonaws.mgn#ListConnectorsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mgn#UninitializedAccountException" + }, + { + "target": "com.amazonaws.mgn#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List Connectors.

", + "smithy.api#http": { + "uri": "/ListConnectors", + "method": "POST", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mgn#ListConnectorsRequest": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.mgn#ListConnectorsRequestFilters", + "traits": { + "smithy.api#documentation": "

List Connectors Request filters.

" + } + }, + "maxResults": { + "target": "com.amazonaws.mgn#MaxResultsType", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

List Connectors Request max results.

" + } + }, + "nextToken": { + "target": "com.amazonaws.mgn#PaginationToken", + "traits": { + "smithy.api#documentation": "

List Connectors Request next token.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mgn#ListConnectorsRequestFilters": { + "type": "structure", + "members": { + "connectorIDs": { + "target": "com.amazonaws.mgn#ConnectorIDsFilter", + "traits": { + "smithy.api#documentation": "

List Connectors Request Filters connector IDs.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

List Connectors Request Filters.

" + } + }, + "com.amazonaws.mgn#ListConnectorsResponse": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.mgn#ConnectorsList", + "traits": { + "smithy.api#documentation": "

List connectors response items.

" + } + }, + "nextToken": { + "target": "com.amazonaws.mgn#PaginationToken", + "traits": { + "smithy.api#documentation": "

List connectors response next token.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.mgn#ListExportErrors": { "type": "operation", "input": { @@ -7691,6 +8046,16 @@ } } }, + "com.amazonaws.mgn#SecretArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 100 + }, + "smithy.api#pattern": "^arn:[\\w-]+:secretsmanager:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:secret:(.+)$" + } + }, "com.amazonaws.mgn#SecurityGroupID": { "type": "string", "traits": { @@ -7894,6 +8259,12 @@ "traits": { "smithy.api#documentation": "

Source server fqdn for action framework.

" } + }, + "connectorAction": { + "target": "com.amazonaws.mgn#SourceServerConnectorAction", + "traits": { + "smithy.api#documentation": "

Source Server connector action.

" + } } }, "traits": { @@ -8009,6 +8380,26 @@ "smithy.api#documentation": "

Source server post migration custom action filters.

" } }, + "com.amazonaws.mgn#SourceServerConnectorAction": { + "type": "structure", + "members": { + "credentialsSecretArn": { + "target": "com.amazonaws.mgn#SecretArn", + "traits": { + "smithy.api#documentation": "

Source Server connector action credentials secret arn.

" + } + }, + "connectorArn": { + "target": "com.amazonaws.mgn#ConnectorArn", + "traits": { + "smithy.api#documentation": "

Source Server connector action connector arn.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Source Server connector action.

" + } + }, "com.amazonaws.mgn#SourceServerID": { "type": "string", "traits": { @@ -8026,6 +8417,9 @@ "target": "com.amazonaws.mgn#SourceServerID" } }, + "update": { + "target": "com.amazonaws.mgn#UpdateSourceServer" + }, "delete": { "target": "com.amazonaws.mgn#DeleteSourceServer" }, @@ -8246,6 +8640,16 @@ "smithy.api#documentation": "

AWS Systems Manager Document external parameter.

" } }, + "com.amazonaws.mgn#SsmInstanceID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 19, + "max": 20 + }, + "smithy.api#pattern": "(^i-[0-9a-zA-Z]{17}$)|(^mi-[0-9a-zA-Z]{17}$)" + } + }, "com.amazonaws.mgn#SsmParameterStoreParameter": { "type": "structure", "members": { @@ -9282,6 +9686,62 @@ } } }, + "com.amazonaws.mgn#UpdateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.mgn#UpdateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.mgn#Connector" + }, + "errors": [ + { + "target": "com.amazonaws.mgn#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mgn#UninitializedAccountException" + }, + { + "target": "com.amazonaws.mgn#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Update Connector.

", + "smithy.api#http": { + "uri": "/UpdateConnector", + "method": "POST", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mgn#UpdateConnectorRequest": { + "type": "structure", + "members": { + "connectorID": { + "target": "com.amazonaws.mgn#ConnectorID", + "traits": { + "smithy.api#documentation": "

Update Connector request connector ID.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.mgn#ConnectorName", + "traits": { + "smithy.api#documentation": "

Update Connector request name.

" + } + }, + "ssmCommandConfig": { + "target": "com.amazonaws.mgn#ConnectorSsmCommandConfig", + "traits": { + "smithy.api#documentation": "

Update Connector request SSM command config.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.mgn#UpdateLaunchConfiguration": { "type": "operation", "input": { @@ -9780,6 +10240,35 @@ } } }, + "com.amazonaws.mgn#UpdateSourceServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.mgn#UpdateSourceServerRequest" + }, + "output": { + "target": "com.amazonaws.mgn#SourceServer" + }, + "errors": [ + { + "target": "com.amazonaws.mgn#ConflictException" + }, + { + "target": "com.amazonaws.mgn#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mgn#UninitializedAccountException" + } + ], + "traits": { + "smithy.api#documentation": "

Update Source Server.

", + "smithy.api#http": { + "uri": "/UpdateSourceServer", + "method": "POST", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, "com.amazonaws.mgn#UpdateSourceServerReplicationType": { "type": "operation", "input": { @@ -9836,6 +10325,33 @@ } } }, + "com.amazonaws.mgn#UpdateSourceServerRequest": { + "type": "structure", + "members": { + "accountID": { + "target": "com.amazonaws.mgn#AccountID", + "traits": { + "smithy.api#documentation": "

Update Source Server request account ID.

" + } + }, + "sourceServerID": { + "target": "com.amazonaws.mgn#SourceServerID", + "traits": { + "smithy.api#documentation": "

Update Source Server request source server ID.

", + "smithy.api#required": {} + } + }, + "connectorAction": { + "target": "com.amazonaws.mgn#SourceServerConnectorAction", + "traits": { + "smithy.api#documentation": "

Update Source Server request connector action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.mgn#UpdateWave": { "type": "operation", "input": { diff --git a/models/omics.json b/models/omics.json index 6197c45950..43a9fce846 100644 --- a/models/omics.json +++ b/models/omics.json @@ -36,6 +36,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to abort multipart read set uploads", "smithy.api#documentation": "

\n Stops a multipart upload.\n

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -820,6 +821,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to batch delete Read Sets in the given Sequence Store", "smithy.api#documentation": "

Deletes one or more read sets.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -1106,6 +1108,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to complete a multipart read set upload", "smithy.api#documentation": "

\n Concludes a multipart upload once you have uploaded all the components.\n

", "smithy.api#endpoint": { "hostPrefix": "storage-" @@ -1559,6 +1562,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to create a multipart read set upload", "smithy.api#documentation": "

\n Begins a multipart read set upload. \n

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -1755,6 +1759,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to create a Reference Store", "aws.iam#conditionKeys": [ "aws:RequestTag/${TagKey}", "aws:TagKeys" @@ -2025,6 +2030,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to create a Sequence Store", "aws.iam#conditionKeys": [ "aws:RequestTag/${TagKey}", "aws:TagKeys" @@ -2737,6 +2743,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to delete a Reference in the given Reference Store", "smithy.api#documentation": "

Deletes a genome reference.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -2812,6 +2819,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to delete a Reference Store", "smithy.api#documentation": "

Deletes a genome reference store.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -3005,6 +3013,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to delete a Sequence Store", "smithy.api#documentation": "

Deletes a sequence store.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -3266,6 +3275,51 @@ } } }, + "com.amazonaws.omics#ETag": { + "type": "structure", + "members": { + "algorithm": { + "target": "com.amazonaws.omics#ETagAlgorithm", + "traits": { + "smithy.api#documentation": "

\n The algorithm used to calculate the read set’s ETag(s).

" + } + }, + "source1": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

\n The ETag hash calculated on Source1 of the read set.\n

" + } + }, + "source2": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

\n The ETag hash calculated on Source2 of the read set.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The entity tag (ETag) is a hash of the object representing its semantic content.\n

" + } + }, + "com.amazonaws.omics#ETagAlgorithm": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FASTQ_MD5up", + "name": "FASTQ_MD5UP" + }, + { + "value": "BAM_MD5up", + "name": "BAM_MD5UP" + }, + { + "value": "CRAM_MD5up", + "name": "CRAM_MD5UP" + } + ] + } + }, "com.amazonaws.omics#Encoding": { "type": "string", "traits": { @@ -4340,6 +4394,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get a Read Set in the given Sequence Store", "smithy.api#documentation": "

Gets a file from a read set.

", "smithy.api#endpoint": { "hostPrefix": "storage-" @@ -4380,6 +4435,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Read Set activation job for the given Sequence Store", "smithy.api#documentation": "

Gets information about a read set activation job.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -4579,6 +4635,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Read Set export job for the given Sequence Store", "smithy.api#documentation": "

Gets information about a read set export job.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -4785,6 +4842,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Read Set import job for the given Sequence Store", "smithy.api#documentation": "

Gets information about a read set import job.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -4992,6 +5050,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Read Set in the given Sequence Store", "smithy.api#documentation": "

Gets details about a read set.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -5127,6 +5186,12 @@ "traits": { "smithy.api#documentation": "

\n The creation type of the read set.\n

" } + }, + "etag": { + "target": "com.amazonaws.omics#ETag", + "traits": { + "smithy.api#documentation": "

\n The entity tag (ETag) is a hash of the object meant to represent its semantic content.\n

" + } } }, "traits": { @@ -5225,6 +5290,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get a Reference in the given Reference Store", "smithy.api#documentation": "

Gets a reference file.

", "smithy.api#endpoint": { "hostPrefix": "storage-" @@ -5265,6 +5331,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Reference import job for the given Reference Store", "smithy.api#documentation": "

Gets information about a reference import job.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -5472,6 +5539,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Reference in the given Reference Store", "smithy.api#documentation": "

Gets information about a genome reference's metadata.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -5680,6 +5748,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Reference Store", "smithy.api#documentation": "

Gets information about a reference store.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -6574,6 +6643,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to get details about a Sequence Store", "smithy.api#documentation": "

Gets information about a sequence store.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8213,6 +8283,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list multipart read set uploads", "smithy.api#documentation": "

\n Lists all multipart read set uploads and their statuses. \n

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8313,6 +8384,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Read Set activation jobs for the given Sequence Store", "smithy.api#documentation": "

Retrieves a list of read set activation jobs.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8419,6 +8491,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Read Set export jobs for the given Sequence Store", "smithy.api#documentation": "

Retrieves a list of read set export jobs.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8525,6 +8598,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Read Set import jobs for the given Sequence Store", "smithy.api#documentation": "

Retrieves a list of read set import jobs.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8637,6 +8711,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list read set upload parts", "smithy.api#documentation": "

\n This operation will list all parts in a requested multipart upload for a sequence store.\n

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8758,6 +8833,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Read Sets in the given Sequence Store", "smithy.api#documentation": "

Retrieves a list of read sets.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8865,6 +8941,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Reference import jobs for the given Reference Store", "smithy.api#documentation": "

Retrieves a list of reference import jobs.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -8968,6 +9045,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Reference Stores", "smithy.api#documentation": "

Retrieves a list of reference stores.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -9067,6 +9145,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list References in the given Reference Store", "smithy.api#documentation": "

Retrieves a list of references.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -9511,6 +9590,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to list Sequence Stores", "smithy.api#documentation": "

Retrieves a list of sequence stores.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -11562,6 +11642,12 @@ "traits": { "smithy.api#documentation": "

\n The creation type of the read set.\n

" } + }, + "etag": { + "target": "com.amazonaws.omics#ETag", + "traits": { + "smithy.api#documentation": "

\n The entity tag (ETag) is a hash of the object representing its semantic content.\n

" + } } }, "traits": { @@ -11639,7 +11725,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "readSet" - } + }, + "smithy.api#documentation": "Represents a resource that stores a single genomic sequence file" } }, "com.amazonaws.omics#ReadSetStatus": { @@ -12073,7 +12160,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "reference" - } + }, + "smithy.api#documentation": "Represents a resource that stores a single genomic reference file" } }, "com.amazonaws.omics#ReferenceStatus": { @@ -12265,7 +12353,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "referenceStore" - } + }, + "smithy.api#documentation": "Represents a resource that stores genomic reference data" } }, "com.amazonaws.omics#ReferenceStreamingBlob": { @@ -13225,7 +13314,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "sequenceStore" - } + }, + "smithy.api#documentation": "Represents a resource that stores genomic sequence data" } }, "com.amazonaws.omics#ServiceQuotaExceededException": { @@ -13570,6 +13660,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to start a Read Set activation job from the given Sequence Store", "smithy.api#documentation": "

Activates an archived read set. To reduce storage charges, Amazon Omics archives unused read\n sets after 30 days.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -13703,6 +13794,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to start a Read Set export job from the given Sequence Store", "smithy.api#documentation": "

Exports a read set to Amazon S3.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -13836,6 +13928,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to start a Read Set import job into the given Sequence Store", "smithy.api#documentation": "

Starts a read set import job.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -14035,6 +14128,7 @@ } ], "traits": { + "aws.iam#actionPermissionDescription": "Grants permission to start a Reference import job into the given Reference Store", "smithy.api#documentation": "

Starts a reference import job.

", "smithy.api#endpoint": { "hostPrefix": "control-storage-" @@ -15682,6 +15776,7 @@ ], "traits": { "aws.auth#unsignedPayload": {}, + "aws.iam#actionPermissionDescription": "Grants permission to upload read set parts", "smithy.api#documentation": "

\n This operation uploads a specific part of a read set. If you upload a new part using a previously used part number, the previously uploaded part will be overwritten. \n

", "smithy.api#endpoint": { "hostPrefix": "storage-" diff --git a/models/pricing.json b/models/pricing.json index 34e6696499..b934d7b807 100644 --- a/models/pricing.json +++ b/models/pricing.json @@ -33,7 +33,7 @@ "name": "pricing" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

The Amazon Web Services Price List API is a centralized and convenient way to programmatically\n query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location,\n Storage Class, and Operating System, and provides prices at\n the SKU level. You can use the Amazon Web Services Price List to do the following:

\n
    \n
  • \n

    Build cost control and scenario planning tools

    \n
  • \n
  • \n

    Reconcile billing data

    \n
  • \n
  • \n

    Forecast future spend for budgeting purposes

    \n
  • \n
  • \n

    Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

    \n
  • \n
\n

Use GetServices without a service code to retrieve the service codes for\n all Amazon Web Services, then GetServices with a service code to\n retrieve the attribute names for that service. After you have the service code and\n attribute names, you can use GetAttributeValues to see what values are\n available for an attribute. With the service code and an attribute name and value, you can\n use GetProducts to find specific products that you're interested in, such as\n an AmazonEC2 instance, with a Provisioned IOPS\n volumeType.

\n

You can use the following endpoints for the Amazon Web Services Price List API:

\n
    \n
  • \n

    https://api.pricing.us-east-1.amazonaws.com

    \n
  • \n
  • \n

    https://api.pricing.ap-south-1.amazonaws.com

    \n
  • \n
", + "smithy.api#documentation": "

The Amazon Web Services Price List API is a centralized and convenient way to programmatically\n query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location,\n Storage Class, and Operating System, and provides prices at\n the SKU level. You can use the Amazon Web Services Price List to do the following:

\n
    \n
  • \n

    Build cost control and scenario planning tools

    \n
  • \n
  • \n

    Reconcile billing data

    \n
  • \n
  • \n

    Forecast future spend for budgeting purposes

    \n
  • \n
  • \n

    Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

    \n
  • \n
\n

Use GetServices without a service code to retrieve the service codes for\n all Amazon Web Services, then GetServices with a service code to\n retrieve the attribute names for that service. After you have the service code and\n attribute names, you can use GetAttributeValues to see what values are\n available for an attribute. With the service code and an attribute name and value, you can\n use GetProducts to find specific products that you're interested in, such as\n an AmazonEC2 instance, with a Provisioned IOPS\n volumeType.

\n

For more information, see Using the\n Amazon Web Services Price List API in the Billing User\n Guide.

", "smithy.api#title": "AWS Price List Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1042,7 +1042,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n \n This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).\n \n

\n

This returns the URL that you can retrieve your Price List file from. This URL is based on\n the PriceListArn and FileFormat that you retrieve from the \n ListPriceLists\n response.

" + "smithy.api#documentation": "

\n \n This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).\n \n

\n

This returns the URL that you can retrieve your Price List file from. This URL is based\n on the PriceListArn and FileFormat that you retrieve from the\n ListPriceLists response.

" } }, "com.amazonaws.pricing#GetPriceListFileUrlRequest": { @@ -1051,14 +1051,14 @@ "PriceListArn": { "target": "com.amazonaws.pricing#PriceListArn", "traits": { - "smithy.api#documentation": "

The unique identifier that maps to where your Price List files are located.\n PriceListArn can be obtained from the \n ListPriceLists\n response.

", + "smithy.api#documentation": "

The unique identifier that maps to where your Price List files are located.\n PriceListArn can be obtained from the ListPriceLists response.

", "smithy.api#required": {} } }, "FileFormat": { "target": "com.amazonaws.pricing#FileFormat", "traits": { - "smithy.api#documentation": "

The format that you want to retrieve your Price List files in. The\n FileFormat can be obtained from the \n ListPriceLists\n response.

", + "smithy.api#documentation": "

The format that you want to retrieve your Price List files in. The\n FileFormat can be obtained from the ListPriceLists response.

", "smithy.api#required": {} } } @@ -1250,7 +1250,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n \n This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).\n \n

\n

This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate.\n Use without a RegionCode filter to list Price List references from all\n available Amazon Web Services Regions. Use with a RegionCode filter to get the\n Price List reference that's specific to a specific Amazon Web Services Region. You can use\n the PriceListArn from the response to get your preferred Price List files\n through the \n GetPriceListFileUrl\n API.

", + "smithy.api#documentation": "

\n \n This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).\n \n

\n

This returns a list of Price List references that the requester if authorized to view,\n given a ServiceCode, CurrencyCode, and an\n EffectiveDate. Use without a RegionCode filter to list Price\n List references from all available Amazon Web Services Regions. Use with a\n RegionCode filter to get the Price List reference that's specific to a\n specific Amazon Web Services Region. You can use the PriceListArn from the\n response to get your preferred Price List files through the GetPriceListFileUrl API.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -1265,7 +1265,7 @@ "ServiceCode": { "target": "com.amazonaws.pricing#ServiceCode", "traits": { - "smithy.api#documentation": "

The service code or the Savings Plan service code for the attributes that\n you want to retrieve. For example, to get the list of applicable Amazon EC2 price lists, use\n AmazonEC2. For a full list of service codes containing On-Demand and\n Reserved Instance (RI) pricing, use the \n DescribeServices\n API.

\n

To retrieve the Compute Savings Plan price lists, use ComputeSavingsPlans. To retrieve Machine Learning Savings Plans price lists, use MachineLearningSavingsPlans.\n

", + "smithy.api#documentation": "

The service code or the Savings Plan service code for the attributes that\n you want to retrieve. For example, to get the list of applicable Amazon EC2 price\n lists, use AmazonEC2. For a full list of service codes containing On-Demand\n and Reserved Instance (RI) pricing, use the DescribeServices API.

\n

To retrieve the Reserved Instance and Compute Savings Plan price lists,\n use ComputeSavingsPlans.

\n

To retrieve Machine Learning Savings Plans price lists, use\n MachineLearningSavingsPlans.

", "smithy.api#required": {} } }, @@ -1279,7 +1279,7 @@ "RegionCode": { "target": "com.amazonaws.pricing#RegionCode", "traits": { - "smithy.api#documentation": "

This is used to filter the Price List by Amazon Web Services Region. For example, to get\n the price list only for the US East (N. Virginia) Region, use\n us-east-1. If nothing is specified, you retrieve price lists for all\n applicable Regions. The available RegionCode list can be retrieved from \n GetAttributeValues\n API.

" + "smithy.api#documentation": "

This is used to filter the Price List by Amazon Web Services Region. For example, to get\n the price list only for the US East (N. Virginia) Region, use\n us-east-1. If nothing is specified, you retrieve price lists for all\n applicable Regions. The available RegionCode list can be retrieved from GetAttributeValues API.

" } }, "CurrencyCode": { diff --git a/models/quicksight.json b/models/quicksight.json index 73ef12cf8a..88d091ac78 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -6050,6 +6050,12 @@ "traits": { "smithy.api#documentation": "

The definition of an analysis.

\n

A definition is the data model of all features in a Dashboard, Template, or Analysis.

\n

Either a SourceEntity or a Definition must be provided in \n order for the request to be valid.

" } + }, + "ValidationStrategy": { + "target": "com.amazonaws.quicksight#ValidationStrategy", + "traits": { + "smithy.api#documentation": "

The option to relax the validation needed to create an analysis with definition objects. This skips the validation step for specific errors.

" + } } }, "traits": { @@ -6227,6 +6233,12 @@ "traits": { "smithy.api#documentation": "

The definition of a dashboard.

\n

A definition is the data model of all features in a Dashboard, Template, or Analysis.

\n

Either a SourceEntity or a Definition must be provided in \n order for the request to be valid.

" } + }, + "ValidationStrategy": { + "target": "com.amazonaws.quicksight#ValidationStrategy", + "traits": { + "smithy.api#documentation": "

The option to relax the validation needed to create a dashboard with definition objects. This option skips the validation step for specific errors.

" + } } }, "traits": { @@ -7811,6 +7823,12 @@ "traits": { "smithy.api#documentation": "

The definition of a template.

\n

A definition is the data model of all features in a Dashboard, Template, or Analysis.

\n

Either a SourceEntity or a Definition must be provided in \n\t\t\torder for the request to be valid.

" } + }, + "ValidationStrategy": { + "target": "com.amazonaws.quicksight#ValidationStrategy", + "traits": { + "smithy.api#documentation": "

TThe option to relax the validation needed to create a template with definition objects. This skips the validation step for specific errors.

" + } } }, "traits": { @@ -11334,6 +11352,36 @@ } } }, + "com.amazonaws.quicksight#DatabaseGroup": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.quicksight#DatabaseGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#DatabaseGroup" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.quicksight#DatabaseUser": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, "com.amazonaws.quicksight#DatabricksParameters": { "type": "structure", "members": { @@ -20540,6 +20588,12 @@ "traits": { "smithy.api#documentation": "

Select all of the values. Null is not the assigned value of select all.

\n
    \n
  • \n

    \n FILTER_ALL_VALUES\n

    \n
  • \n
" } + }, + "NullOption": { + "target": "com.amazonaws.quicksight#FilterNullOption", + "traits": { + "smithy.api#documentation": "

This option determines how null values should be treated when filtering data.

\n
    \n
  • \n

    \n ALL_VALUES: Include null values in filtered results.

    \n
  • \n
  • \n

    \n NULLS_ONLY: Only include null values in filtered results.

    \n
  • \n
  • \n

    \n NON_NULLS_ONLY: Exclude null values from filtered results.

    \n
  • \n
" + } } }, "traits": { @@ -30288,6 +30342,12 @@ "smithy.api#enumValue": "DATASET" } }, + "DATASOURCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATASOURCE" + } + }, "TOPIC": { "target": "smithy.api#Unit", "traits": { @@ -35792,6 +35852,41 @@ } } }, + "com.amazonaws.quicksight#RedshiftIAMParameters": { + "type": "structure", + "members": { + "RoleArn": { + "target": "com.amazonaws.quicksight#RoleArn", + "traits": { + "smithy.api#documentation": "

Use the RoleArn structure to allow Amazon QuickSight to call redshift:GetClusterCredentials on your cluster. The calling principal must have iam:PassRole access to pass the role to Amazon QuickSight. The role's trust policy must allow the Amazon QuickSight service principal to assume the role.

", + "smithy.api#required": {} + } + }, + "DatabaseUser": { + "target": "com.amazonaws.quicksight#DatabaseUser", + "traits": { + "smithy.api#documentation": "

The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser to True to create a new user with PUBLIC permissions.

", + "smithy.api#required": {} + } + }, + "DatabaseGroups": { + "target": "com.amazonaws.quicksight#DatabaseGroupList", + "traits": { + "smithy.api#documentation": "

A list of groups whose permissions will be granted to Amazon QuickSight to access the cluster. These permissions are combined with the permissions granted to Amazon QuickSight by the DatabaseUser. If you choose to include this parameter, the RoleArn must grant access to redshift:JoinGroup.

" + } + }, + "AutoCreateDatabaseUser": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Automatically creates a database user. If your database doesn't have a DatabaseUser, set this parameter to True. If there is no DatabaseUser, Amazon QuickSight can't connect to your cluster. The RoleArn that you use for this operation must grant access to redshift:CreateClusterUser to successfully create the user.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that grants Amazon QuickSight access to your cluster and make a call to the redshift:GetClusterCredentials API. For more information on the redshift:GetClusterCredentials API, see \n GetClusterCredentials\n .

" + } + }, "com.amazonaws.quicksight#RedshiftParameters": { "type": "structure", "members": { @@ -35820,10 +35915,16 @@ "traits": { "smithy.api#documentation": "

Cluster ID. This field can be blank if the Host and Port are\n provided.

" } + }, + "IAMParameters": { + "target": "com.amazonaws.quicksight#RedshiftIAMParameters", + "traits": { + "smithy.api#documentation": "

An optional parameter that uses IAM authentication to grant Amazon QuickSight access to your cluster. This parameter can be used instead of DataSourceCredentials.

" + } } }, "traits": { - "smithy.api#documentation": "

The parameters for Amazon Redshift. The ClusterId field can be blank if\n Host and Port are both set. The Host and\n Port fields can be blank if the ClusterId field is set.

" + "smithy.api#documentation": "

The parameters for Amazon Redshift. The ClusterId field can be blank if\n Host and Port are both set. The Host and Port fields can be blank if the ClusterId field is set.

" } }, "com.amazonaws.quicksight#ReferenceLine": { @@ -36623,7 +36724,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 64 + "max": 256 } } }, @@ -36632,7 +36733,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 64 + "max": 256 } } }, @@ -42168,6 +42269,12 @@ "traits": { "smithy.api#enumValue": "END" } + }, + "AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTO" + } } } }, @@ -46101,6 +46208,12 @@ "traits": { "smithy.api#documentation": "

The definition of an analysis.

\n

A definition is the data model of all features in a Dashboard, Template, or Analysis.

" } + }, + "ValidationStrategy": { + "target": "com.amazonaws.quicksight#ValidationStrategy", + "traits": { + "smithy.api#documentation": "

The option to relax the validation needed to update an analysis with definition objects. This skips the validation step for specific errors.

" + } } }, "traits": { @@ -46485,6 +46598,12 @@ "traits": { "smithy.api#documentation": "

The definition of a dashboard.

\n

A definition is the data model of all features in a Dashboard, Template, or Analysis.

" } + }, + "ValidationStrategy": { + "target": "com.amazonaws.quicksight#ValidationStrategy", + "traits": { + "smithy.api#documentation": "

The option to relax the validation needed to update a dashboard with definition objects. This skips the validation step for specific errors.

" + } } }, "traits": { @@ -48138,6 +48257,12 @@ "traits": { "smithy.api#documentation": "

The definition of a template.

\n

A definition is the data model of all features in a Dashboard, Template, or Analysis.

" } + }, + "ValidationStrategy": { + "target": "com.amazonaws.quicksight#ValidationStrategy", + "traits": { + "smithy.api#documentation": "

The option to relax the validation needed to update a template with definition objects. This skips the validation step for specific errors.

" + } } }, "traits": { @@ -49630,6 +49755,38 @@ "target": "com.amazonaws.quicksight#VPCConnectionSummary" } }, + "com.amazonaws.quicksight#ValidationStrategy": { + "type": "structure", + "members": { + "Mode": { + "target": "com.amazonaws.quicksight#ValidationStrategyMode", + "traits": { + "smithy.api#documentation": "

The mode of validation for the asset to be creaed or updated. When you set this value to STRICT, strict validation for every error is enforced. When you set this value to LENIENT, validation is skipped for specific UI errors.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The option to relax the validation that is required to create and update analyses, dashboards, and templates with definition objects. When you set this value to LENIENT, validation is skipped for specific errors.

" + } + }, + "com.amazonaws.quicksight#ValidationStrategyMode": { + "type": "enum", + "members": { + "STRICT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STRICT" + } + }, + "LENIENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LENIENT" + } + } + } + }, "com.amazonaws.quicksight#ValueWhenUnsetOption": { "type": "enum", "members": { diff --git a/models/rds.json b/models/rds.json index 53241d6321..b7039114b4 100644 --- a/models/rds.json +++ b/models/rds.json @@ -2009,14 +2009,14 @@ "ApplyAction": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The pending maintenance action to apply to this resource.

\n

Valid values: system-update, db-upgrade, \n hardware-maintenance, ca-certificate-rotation\n

", + "smithy.api#documentation": "

The pending maintenance action to apply to this resource.

\n

Valid Values: system-update, db-upgrade, \n hardware-maintenance, ca-certificate-rotation\n

", "smithy.api#required": {} } }, "OptInType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in \n request of type immediate can't be undone.

\n

Valid values:

\n
    \n
  • \n

    \n immediate - Apply the maintenance action immediately.

    \n
  • \n
  • \n

    \n next-maintenance - Apply the maintenance action during\n the next maintenance window for the resource.

    \n
  • \n
  • \n

    \n undo-opt-in - Cancel any existing next-maintenance\n opt-in requests.

    \n
  • \n
", + "smithy.api#documentation": "

A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in \n request of type immediate can't be undone.

\n

Valid Values:

\n
    \n
  • \n

    \n immediate - Apply the maintenance action immediately.

    \n
  • \n
  • \n

    \n next-maintenance - Apply the maintenance action during\n the next maintenance window for the resource.

    \n
  • \n
  • \n

    \n undo-opt-in - Cancel any existing next-maintenance\n opt-in requests.

    \n
  • \n
", "smithy.api#required": {} } } @@ -2349,13 +2349,13 @@ "Force": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to force the DB cluster to backtrack when binary logging is\n enabled. Otherwise, an error occurs when binary logging is enabled.

" + "smithy.api#documentation": "

Specifies whether to force the DB cluster to backtrack when binary logging is\n enabled. Otherwise, an error occurs when binary logging is enabled.

" } }, "UseEarliestTimeOnPointInTimeUnavailable": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to backtrack the DB cluster to the earliest possible\n backtrack time when BacktrackTo is set to a timestamp earlier than the earliest\n backtrack time. When this parameter is disabled and BacktrackTo is set to a timestamp earlier than the earliest\n backtrack time, an error occurs.

" + "smithy.api#documentation": "

Specifies whether to backtrack the DB cluster to the earliest possible\n backtrack time when BacktrackTo is set to a timestamp earlier than the earliest\n backtrack time. When this parameter is disabled and BacktrackTo is set to a timestamp earlier than the earliest\n backtrack time, an error occurs.

" } } }, @@ -2675,7 +2675,7 @@ "CustomerOverride": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Whether there is an override for the default certificate identifier.

" + "smithy.api#documentation": "

Indicates whether there is an override for the default certificate identifier.

" } }, "CustomerOverrideValidTill": { @@ -2852,7 +2852,7 @@ "IAMDatabaseAuthenticationEnabled": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" + "smithy.api#documentation": "

Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" } }, "EngineVersion": { @@ -2896,19 +2896,19 @@ "MaxConnectionsPercent": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The maximum size of the connection pool for each target in a target group. The value is expressed as a percentage of the\n max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.

\n

If you specify MaxIdleConnectionsPercent, then you must also include a value for this parameter.

\n

Default: 10 for RDS for Microsoft SQL Server, and 100 for all other engines

\n

Constraints: Must be between 1 and 100.

" + "smithy.api#documentation": "

The maximum size of the connection pool for each target in a target group. The value is expressed as a percentage of the\n max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.

\n

If you specify MaxIdleConnectionsPercent, then you must also include a value for this parameter.

\n

Default: 10 for RDS for Microsoft SQL Server, and 100 for all other engines

\n

Constraints:

\n
    \n
  • \n

    Must be between 1 and 100.

    \n
  • \n
" } }, "MaxIdleConnectionsPercent": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

Controls how actively the proxy closes idle database connections in the connection pool.\n The value is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.\n With a high value, the proxy leaves a high percentage of idle database connections open. A low value causes the proxy to close more idle connections and return them to the database.

\n

If you specify this parameter, then you must also include a value for MaxConnectionsPercent.

\n

Default: The default value is half of the value of MaxConnectionsPercent. For example, if MaxConnectionsPercent is 80, then the default value of \n MaxIdleConnectionsPercent is 40. If the value of MaxConnectionsPercent isn't specified, then for SQL Server, MaxIdleConnectionsPercent is 5, and \n for all other engines, the default is 50.

\n

Constraints: Must be between 0 and the value of MaxConnectionsPercent.

" + "smithy.api#documentation": "

A value that controls how actively the proxy closes idle database connections in the connection pool.\n The value is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.\n With a high value, the proxy leaves a high percentage of idle database connections open. A low value causes the proxy to close more idle connections and return them to the database.

\n

If you specify this parameter, then you must also include a value for MaxConnectionsPercent.

\n

Default: The default value is half of the value of MaxConnectionsPercent. For example, if MaxConnectionsPercent is 80, then the default value of \n MaxIdleConnectionsPercent is 40. If the value of MaxConnectionsPercent isn't specified, then for SQL Server, MaxIdleConnectionsPercent is 5, and \n for all other engines, the default is 50.

\n

Constraints:

\n
    \n
  • \n

    Must be between 0 and the value of MaxConnectionsPercent.

    \n
  • \n
" } }, "ConnectionBorrowTimeout": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the\n proxy has opened its maximum number of connections and all connections are busy with client sessions.

\n

Default: 120

\n

Constraints: between 1 and 3600, or 0 representing unlimited

" + "smithy.api#documentation": "

The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the\n proxy has opened its maximum number of connections and all connections are busy with client sessions. For an unlimited wait time, specify 0.

\n

Default: 120\n

\n

Constraints:

\n
    \n
  • \n

    Must be between 0 and 3600.

    \n
  • \n
" } }, "SessionPinningFilters": { @@ -3156,7 +3156,7 @@ "CopyTags": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot. \n By default, tags are not copied.

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot. \n By default, tags are not copied.

" } }, "Tags": { @@ -3365,7 +3365,7 @@ "CopyTags": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the source DB snapshot to the target DB snapshot. \n By default, tags aren't copied.

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the source DB snapshot to the target DB snapshot. \n By default, tags aren't copied.

" } }, "PreSignedUrl": { @@ -3389,7 +3389,7 @@ "CopyOptionGroup": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy the DB option group associated with the source DB snapshot to the target \n Amazon Web Services account and associate with the target DB snapshot. The associated option group can be copied only with \n cross-account snapshot copy calls.

" + "smithy.api#documentation": "

Specifies whether to copy the DB option group associated with the source DB snapshot to the target \n Amazon Web Services account and associate with the target DB snapshot. The associated option group can be copied only with \n cross-account snapshot copy calls.

" } } }, @@ -4695,7 +4695,7 @@ "MasterUserPassword": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The password for the master user.

\n

This setting doesn't apply to Amazon Aurora DB instances. The password for the master user is managed by the DB\n cluster.

\n

Constraints:

\n
    \n
  • \n

    Can't be specified if ManageMasterUserPassword is turned on.

    \n
  • \n
  • \n

    Can include any printable ASCII character except \"/\", \"\"\", or \"@\".

    \n
  • \n
\n

Length Constraints:

\n
    \n
  • \n

    RDS for MariaDB - Must contain from 8 to 41 characters.

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - Must contain from 8 to 128 characters.

    \n
  • \n
  • \n

    RDS for MySQL - Must contain from 8 to 41 characters.

    \n
  • \n
  • \n

    RDS for Oracle - Must contain from 8 to 30 characters.

    \n
  • \n
  • \n

    RDS for PostgreSQL - Must contain from 8 to 128 characters.

    \n
  • \n
" + "smithy.api#documentation": "

The password for the master user.

\n

This setting doesn't apply to Amazon Aurora DB instances. The password for the master user is managed by the DB\n cluster.

\n

Constraints:

\n
    \n
  • \n

    Can't be specified if ManageMasterUserPassword is turned on.

    \n
  • \n
  • \n

    Can include any printable ASCII character except \"/\", \"\"\", or \"@\". For RDS for Oracle, can't include the \"&\" (ampersand) or the \"'\" (single quotes) character.

    \n
  • \n
\n

Length Constraints:

\n
    \n
  • \n

    RDS for MariaDB - Must contain from 8 to 41 characters.

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - Must contain from 8 to 128 characters.

    \n
  • \n
  • \n

    RDS for MySQL - Must contain from 8 to 41 characters.

    \n
  • \n
  • \n

    RDS for Oracle - Must contain from 8 to 30 characters.

    \n
  • \n
  • \n

    RDS for PostgreSQL - Must contain from 8 to 128 characters.

    \n
  • \n
" } }, "DBSecurityGroups": { @@ -5015,6 +5015,12 @@ "traits": { "smithy.api#documentation": "

The Oracle system identifier (SID), which is the name of the Oracle database instance that \n manages your database files. In this context, the term \"Oracle database instance\" refers exclusively \n to the system global area (SGA) and Oracle background processes. If you don't specify a SID, \n the value defaults to RDSCDB. The Oracle SID is also the name of your CDB.

" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + } } }, "traits": { @@ -5099,7 +5105,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new DB instance that acts as a read replica for an existing source DB\n instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running\n MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a\n Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working\n with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide.

\n

Amazon Aurora doesn't support this operation. Call the CreateDBInstance\n operation to create a DB instance for an Aurora DB cluster.

\n

All read replica DB instances are created with backups disabled. All other attributes\n (including DB security groups and DB parameter groups) are inherited from the source DB\n instance or cluster, except as specified.

\n \n

Your source DB instance or cluster must have backup retention enabled.

\n
", + "smithy.api#documentation": "

Creates a new DB instance that acts as a read replica for an existing source DB\n instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running\n MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a\n Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working\n with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide.

\n

Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance\n operation.

\n

All read replica DB instances are created with backups disabled. All other attributes\n (including DB security groups and DB parameter groups) are inherited from the source DB\n instance or cluster, except as specified.

\n \n

Your source DB instance or cluster must have backup retention enabled.

\n
", "smithy.api#examples": [ { "title": "To create a DB instance read replica", @@ -5140,7 +5146,7 @@ "DBInstanceClass": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The compute and memory capacity of the read replica, for example\n db.m4.large. Not all DB instance classes are available in all Amazon Web Services\n Regions, or for all database engines. For the full list of DB instance classes, and\n availability for your engine, see DB Instance\n Class in the Amazon RDS User Guide.

\n

Default: Inherits from the source DB instance.

" + "smithy.api#documentation": "

The compute and memory capacity of the read replica, for example\n db.m4.large. Not all DB instance classes are available in all Amazon Web Services\n Regions, or for all database engines. For the full list of DB instance classes, and\n availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

\n

Default: Inherits the value from the source DB instance.

" } }, "AvailabilityZone": { @@ -5152,43 +5158,43 @@ "Port": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The port number that the DB instance uses for connections.

\n

Default: Inherits from the source DB instance

\n

Valid Values: 1150-65535\n

" + "smithy.api#documentation": "

The port number that the DB instance uses for connections.

\n

Valid Values: 1150-65535\n

\n

Default: Inherits the value from the source DB instance.

" } }, "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the read replica is in a Multi-AZ deployment.

\n

You can create a read replica as a Multi-AZ DB instance. RDS creates a standby of your\n replica in another Availability Zone for failover support for the replica. Creating your\n read replica as a Multi-AZ DB instance is independent of whether the source is a\n Multi-AZ DB instance or a Multi-AZ DB cluster.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether the read replica is in a Multi-AZ deployment.

\n

You can create a read replica as a Multi-AZ DB instance. RDS creates a standby of your\n replica in another Availability Zone for failover support for the replica. Creating your\n read replica as a Multi-AZ DB instance is independent of whether the source is a\n Multi-AZ DB instance or a Multi-AZ DB cluster.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether minor engine upgrades are applied automatically to the\n read replica during the maintenance window.

\n

This setting doesn't apply to RDS Custom.

\n

Default: Inherits from the source DB instance

" + "smithy.api#documentation": "

Specifies whether to automatically apply minor engine upgrades to the\n read replica during the maintenance window.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

Default: Inherits the value from the source DB instance.

" } }, "Iops": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

" + "smithy.api#documentation": "

The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance.

" } }, "OptionGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The option group the DB instance is associated with. If omitted, the option group\n associated with the source instance or cluster is used.

\n \n

For SQL Server, you must use the option group associated with the source.

\n
\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The option group to associate the DB instance with. If not specified, RDS uses the option group\n associated with the source DB instance or cluster.

\n \n

For SQL Server, you must use the option group associated with the source.

\n
\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "DBParameterGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The name of the DB parameter group to associate with this DB instance.

\n

If you do not specify a value for DBParameterGroupName, then Amazon RDS\n uses the DBParameterGroup of source DB instance for a same Region read\n replica, or the default DBParameterGroup for the specified DB engine for a\n cross-Region read replica.

\n

Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. \n It isn't supported for RDS Custom.

\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 letters, numbers, or hyphens.

    \n
  • \n
  • \n

    First character must be a letter

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens

    \n
  • \n
" + "smithy.api#documentation": "

The name of the DB parameter group to associate with this DB instance.

\n

If you don't specify a value for DBParameterGroupName, then Amazon RDS\n uses the DBParameterGroup of the source DB instance for a same Region read\n replica, or the default DBParameterGroup for the specified DB engine for a\n cross-Region read replica.

\n

Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 letters, numbers, or hyphens.

    \n
  • \n
  • \n

    First character must be a letter.

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens.

    \n
  • \n
" } }, "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" + "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" } }, "Tags": { @@ -5197,37 +5203,37 @@ "DBSubnetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.

\n

Constraints:

\n
    \n
  • \n

    If supplied, must match the name of an existing DBSubnetGroup.

    \n
  • \n
  • \n

    The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running.

    \n
  • \n
  • \n

    All read replicas in one Amazon Web Services Region that are created from the same source DB\n instance must either:>

    \n
      \n
    • \n

      Specify DB subnet groups from the same VPC. All these read replicas are created in the same\n VPC.

      \n
    • \n
    • \n

      Not specify a DB subnet group. All these read replicas are created outside of any\n VPC.

      \n
    • \n
    \n
  • \n
\n

Example: mydbsubnetgroup\n

" + "smithy.api#documentation": "

A DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.

\n

Constraints:

\n
    \n
  • \n

    If supplied, must match the name of an existing DB subnet group.

    \n
  • \n
  • \n

    The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running.

    \n
  • \n
  • \n

    All read replicas in one Amazon Web Services Region that are created from the same source DB\n instance must either:

    \n
      \n
    • \n

      Specify DB subnet groups from the same VPC. All these read replicas are created in the same\n VPC.

      \n
    • \n
    • \n

      Not specify a DB subnet group. All these read replicas are created outside of any\n VPC.

      \n
    • \n
    \n
  • \n
\n

Example: mydbsubnetgroup\n

" } }, "VpcSecurityGroupIds": { "target": "com.amazonaws.rds#VpcSecurityGroupIdList", "traits": { - "smithy.api#documentation": "

A list of Amazon EC2 VPC security groups to associate with the read replica.

\n

This setting doesn't apply to RDS Custom.

\n

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" + "smithy.api#documentation": "

A list of Amazon EC2 VPC security groups to associate with the read replica.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" } }, "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type to be associated with the read replica.

\n

Valid values: gp2 | gp3 | io1 | standard\n

\n

If you specify io1 or gp3, you must also include a value for the\n Iops parameter.

\n

Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

" + "smithy.api#documentation": "

The storage type to associate with the read replica.

\n

If you specify io1 or gp3, you must also include a value for the\n Iops parameter.

\n

Valid Values: gp2 | gp3 | io1 | standard\n

\n

Default: io1 if the Iops parameter\n is specified. Otherwise, gp2.

" } }, "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the read replica to snapshots of\n the read replica. By default, tags are not copied.

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the read replica to snapshots of\n the read replica. By default, tags aren't copied.

" } }, "MonitoringInterval": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are\n collected for the read replica. To disable collecting Enhanced Monitoring metrics,\n specify 0. The default is 0.

\n

If MonitoringRoleArn is specified, then you must also set MonitoringInterval\n to a value other than 0.

\n

This setting doesn't apply to RDS Custom.

\n

Valid Values: 0, 1, 5, 10, 15, 30, 60\n

" + "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are\n collected for the read replica. To disable collection of Enhanced Monitoring metrics,\n specify 0. The default is 0.

\n

If MonitoringRoleArn is specified, then you must set MonitoringInterval\n to a value other than 0.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

Valid Values: 0, 1, 5, 10, 15, 30, 60\n

\n

Default: 0\n

" } }, "MonitoringRoleArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For\n example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role,\n go to To \n create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

\n

If MonitoringInterval is set to a value other than 0, then you must \n supply a MonitoringRoleArn value.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For\n example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role,\n go to To \n create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

\n

If MonitoringInterval is set to a value other than 0, then you must \n supply a MonitoringRoleArn value.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "KmsKeyId": { @@ -5239,67 +5245,67 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or\n from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4\n signed request for the CreateDBInstanceReadReplica API operation in the\n source Amazon Web Services Region that contains the source DB instance.

\n

This setting applies only to Amazon Web Services GovCloud (US) Regions and \n China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.

\n

This setting applies only when replicating from a source DB\n instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions.

\n

You must specify this parameter when you create an encrypted read replica from\n another Amazon Web Services Region by using the Amazon RDS API. Don't specify\n PreSignedUrl when you are creating an encrypted read replica in the\n same Amazon Web Services Region.

\n

The presigned URL must be a valid request for the\n CreateDBInstanceReadReplica API operation that can run in the\n source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL\n request must contain the following parameter values:

\n
    \n
  • \n

    \n DestinationRegion - The Amazon Web Services Region that the encrypted read\n replica is created in. This Amazon Web Services Region is the same one where the\n CreateDBInstanceReadReplica operation is called that contains\n this presigned URL.

    \n

    For example, if you create an encrypted DB instance in the us-west-1\n Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you\n call the CreateDBInstanceReadReplica operation in the us-east-1\n Amazon Web Services Region and provide a presigned URL that contains a call to the\n CreateDBInstanceReadReplica operation in the us-west-2\n Amazon Web Services Region. For this example, the DestinationRegion in the\n presigned URL must be set to the us-east-1 Amazon Web Services Region.

    \n
  • \n
  • \n

    \n KmsKeyId - The KMS key identifier for the key to use to\n encrypt the read replica in the destination Amazon Web Services Region. This is the same\n identifier for both the CreateDBInstanceReadReplica operation that\n is called in the destination Amazon Web Services Region, and the operation contained in the\n presigned URL.

    \n
  • \n
  • \n

    \n SourceDBInstanceIdentifier - The DB instance identifier for the\n encrypted DB instance to be replicated. This identifier must be in the Amazon\n Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are\n creating an encrypted read replica from a DB instance in the us-west-2\n Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the\n following example:\n arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.

    \n
  • \n
\n

To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.

\n \n

If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

\n

\n SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server \n doesn't support cross-Region read replicas.

\n
\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or\n from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4\n signed request for the CreateDBInstanceReadReplica API operation in the\n source Amazon Web Services Region that contains the source DB instance.

\n

This setting applies only to Amazon Web Services GovCloud (US) Regions and \n China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.

\n

This setting applies only when replicating from a source DB\n instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions.

\n

You must specify this parameter when you create an encrypted read replica from\n another Amazon Web Services Region by using the Amazon RDS API. Don't specify\n PreSignedUrl when you are creating an encrypted read replica in the\n same Amazon Web Services Region.

\n

The presigned URL must be a valid request for the\n CreateDBInstanceReadReplica API operation that can run in the\n source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL\n request must contain the following parameter values:

\n
    \n
  • \n

    \n DestinationRegion - The Amazon Web Services Region that the encrypted read\n replica is created in. This Amazon Web Services Region is the same one where the\n CreateDBInstanceReadReplica operation is called that contains\n this presigned URL.

    \n

    For example, if you create an encrypted DB instance in the us-west-1\n Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you\n call the CreateDBInstanceReadReplica operation in the us-east-1\n Amazon Web Services Region and provide a presigned URL that contains a call to the\n CreateDBInstanceReadReplica operation in the us-west-2\n Amazon Web Services Region. For this example, the DestinationRegion in the\n presigned URL must be set to the us-east-1 Amazon Web Services Region.

    \n
  • \n
  • \n

    \n KmsKeyId - The KMS key identifier for the key to use to\n encrypt the read replica in the destination Amazon Web Services Region. This is the same\n identifier for both the CreateDBInstanceReadReplica operation that\n is called in the destination Amazon Web Services Region, and the operation contained in the\n presigned URL.

    \n
  • \n
  • \n

    \n SourceDBInstanceIdentifier - The DB instance identifier for the\n encrypted DB instance to be replicated. This identifier must be in the Amazon\n Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are\n creating an encrypted read replica from a DB instance in the us-west-2\n Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the\n following example:\n arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.

    \n
  • \n
\n

To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.

\n \n

If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

\n

\n SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server \n doesn't support cross-Region read replicas.

\n
\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.

\n

For more information about IAM database authentication, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.

\n

For more information about IAM database authentication, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable Performance Insights for the read replica.

\n

For more information, see Using\n Amazon Performance Insights in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether to enable Performance Insights for the read replica.

\n

For more information, see Using\n Amazon Performance Insights in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

\n
    \n
  • \n

    7

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23

    \n
  • \n
  • \n

    731

    \n
  • \n
\n

For example, the following values are valid:

\n
    \n
  • \n

    93 (3 months * 31)

    \n
  • \n
  • \n

    341 (11 months * 31)

    \n
  • \n
  • \n

    589 (19 months * 31)

    \n
  • \n
  • \n

    731

    \n
  • \n
\n

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS returns an error.

" } }, "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

The list of logs that the new DB instance is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used. For more information, see \n Publishing\n Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The list of logs that the new DB instance is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used. For more information, see \n Publishing\n Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "ProcessorFeatures": { "target": "com.amazonaws.rds#ProcessorFeatureList", "traits": { - "smithy.api#documentation": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "UseDefaultProcessorFeatures": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance class of the DB instance uses its default\n processor features.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether the DB instance class of the DB instance uses its default\n processor features.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance has deletion protection enabled. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled. For more information, see \n \n Deleting a DB Instance.

" + "smithy.api#documentation": "

Specifies whether to enable deletion protection for the DB instance. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled. For more information, see \n \n Deleting a DB Instance.

" } }, "Domain": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

\n

For more information, see \n Kerberos Authentication in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

\n

For more information, see \n Kerberos Authentication in the Amazon RDS User Guide.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "DomainIAMRoleName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The name of the IAM role to be used when making API calls to the Directory\n Service.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

The name of the IAM role to use when making API calls to the Directory\n Service.

\n

This setting doesn't apply to RDS Custom DB instances.

" } }, "DomainFqdn": { @@ -5341,25 +5347,25 @@ "CustomIamInstanceProfile": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The instance profile associated with the underlying Amazon EC2 instance of an \n RDS Custom DB instance. The instance profile must meet the following requirements:

\n
    \n
  • \n

    The profile must exist in your account.

    \n
  • \n
  • \n

    The profile must have an IAM role that Amazon EC2 has permissions to assume.

    \n
  • \n
  • \n

    The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom.

    \n
  • \n
\n

For the list of permissions required for the IAM role, see \n \n Configure IAM and your VPC in the Amazon RDS User Guide.

\n

This setting is required for RDS Custom.

" + "smithy.api#documentation": "

The instance profile associated with the underlying Amazon EC2 instance of an \n RDS Custom DB instance. The instance profile must meet the following requirements:

\n
    \n
  • \n

    The profile must exist in your account.

    \n
  • \n
  • \n

    The profile must have an IAM role that Amazon EC2 has permissions to assume.

    \n
  • \n
  • \n

    The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom.

    \n
  • \n
\n

For the list of permissions required for the IAM role, see \n \n Configure IAM and your VPC in the Amazon RDS User Guide.

\n

This setting is required for RDS Custom DB instances.

" } }, "NetworkType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The network type of the DB instance.

\n

Valid values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for read replica. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

The network type of the DB instance.

\n

Valid Values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for read replica. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon RDS User Guide.\n

" } }, "StorageThroughput": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

Specifies the storage throughput value for the read replica.

\n

This setting doesn't apply to RDS Custom or Amazon Aurora.

" + "smithy.api#documentation": "

Specifies the storage throughput value for the read replica.

\n

This setting doesn't apply to RDS Custom or Amazon Aurora DB instances.

" } }, "EnableCustomerOwnedIp": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS\n on Outposts read replica.

\n

A CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the read replica from outside of its virtual\n private cloud (VPC) on your local network.

\n

For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.

\n

For more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.

" + "smithy.api#documentation": "

Specifies whether to enable a customer-owned IP address (CoIP) for an RDS\n on Outposts read replica.

\n

A CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the read replica from outside of its virtual\n private cloud (VPC) on your local network.

\n

For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.

\n

For more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.

" } }, "AllocatedStorage": { @@ -5373,6 +5379,12 @@ "traits": { "smithy.api#documentation": "

The identifier of the Multi-AZ DB cluster that will act as the source for the read\n replica. Each DB cluster can have up to 15 read replicas.

\n

Constraints:

\n
    \n
  • \n

    Must be the identifier of an existing Multi-AZ DB cluster.

    \n
  • \n
  • \n

    Can't be specified if the SourceDBInstanceIdentifier parameter is\n also specified.

    \n
  • \n
  • \n

    The specified DB cluster must have automatic backups enabled, that is, its\n backup retention period must be greater than 0.

    \n
  • \n
  • \n

    The source DB cluster must be in the same Amazon Web Services Region as the read replica.\n Cross-Region replication isn't supported.

    \n
  • \n
" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + } } }, "traits": { @@ -5572,7 +5584,7 @@ "TargetRole": { "target": "com.amazonaws.rds#DBProxyEndpointTargetRole", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB proxy endpoint can be used for read/write\n or read-only operations. The default is READ_WRITE. The only role that proxies for RDS for Microsoft SQL Server \n support is READ_WRITE.

" + "smithy.api#documentation": "

The role of the DB proxy endpoint. The role determines whether the endpoint can be used for read/write\n or only read operations. The default is READ_WRITE. The only role that proxies for RDS for Microsoft SQL Server \n support is READ_WRITE.

" } }, "Tags": { @@ -5645,7 +5657,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy.\n By enabling this setting, you can enforce encrypted TLS connections to the proxy.

" + "smithy.api#documentation": "

Specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy.\n By enabling this setting, you can enforce encrypted TLS connections to the proxy.

" } }, "IdleClientTimeout": { @@ -5658,7 +5670,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether the proxy includes detailed information about SQL statements in its logs.\n This information helps you to debug issues involving SQL behavior or the performance\n and scalability of the proxy connections. The debug information includes the text of\n SQL statements that you submit through the proxy. Thus, only enable this setting\n when needed for debugging, and only when you have security measures in place to\n safeguard any sensitive information that appears in the logs.

" + "smithy.api#documentation": "

Specifies whether the proxy includes detailed information about SQL statements in its logs.\n This information helps you to debug issues involving SQL behavior or the performance\n and scalability of the proxy connections. The debug information includes the text of\n SQL statements that you submit through the proxy. Thus, only enable this setting\n when needed for debugging, and only when you have security measures in place to\n safeguard any sensitive information that appears in the logs.

" } }, "Tags": { @@ -6071,7 +6083,7 @@ "SourceType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be\n notified of events generated by a DB instance, you set this parameter to\n db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are\n returned.

\n

Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" + "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be\n notified of events generated by a DB instance, you set this parameter to\n db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are\n returned.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" } }, "EventCategories": { @@ -6089,7 +6101,7 @@ "Enabled": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.

" + "smithy.api#documentation": "

Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.

" } }, "Tags": { @@ -6791,7 +6803,7 @@ "GlobalWriteForwardingRequested": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether write forwarding is enabled for a secondary cluster\n in an Aurora global database. Because write forwarding takes time to enable, check the\n value of GlobalWriteForwardingStatus to confirm that the request has completed\n before using the write forwarding feature for this cluster.

" + "smithy.api#documentation": "

Indicates whether write forwarding is enabled for a secondary cluster\n in an Aurora global database. Because write forwarding takes time to enable, check the\n value of GlobalWriteForwardingStatus to confirm that the request has completed\n before using the write forwarding feature for this cluster.

" } }, "PendingModifiedValues": { @@ -6891,7 +6903,7 @@ "LocalWriteForwardingStatus": { "target": "com.amazonaws.rds#LocalWriteForwardingStatus", "traits": { - "smithy.api#documentation": "

Specifies whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process \n of enabling it.

" + "smithy.api#documentation": "

Indicates whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process \n of enabling it.

" } }, "AwsBackupRecoveryPointArn": { @@ -6986,7 +6998,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and \n otherwise false.

" + "smithy.api#documentation": "

Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" } }, "ClusterCreateTime": { @@ -6999,7 +7011,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether the source DB cluster is encrypted.

" + "smithy.api#documentation": "

Indicates whether the source DB cluster is encrypted.

" } }, "AllocatedStorage": { @@ -7444,7 +7456,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

" + "smithy.api#documentation": "

Indicates whether the cluster member is the primary DB instance for the DB cluster.

" } }, "DBClusterParameterGroupStatus": { @@ -7762,107 +7774,107 @@ "AvailabilityZones": { "target": "com.amazonaws.rds#AvailabilityZones", "traits": { - "smithy.api#documentation": "

Provides the list of Availability Zones (AZs) where instances in the DB cluster snapshot can be restored.

" + "smithy.api#documentation": "

The list of Availability Zones (AZs) where instances in the DB cluster snapshot can be restored.

" } }, "DBClusterSnapshotIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the identifier for the DB cluster snapshot.

" + "smithy.api#documentation": "

The identifier for the DB cluster snapshot.

" } }, "DBClusterIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

" + "smithy.api#documentation": "

The DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

" } }, "SnapshotCreateTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "

Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

" + "smithy.api#documentation": "

The time when the snapshot was taken, in Universal Coordinated Time (UTC).

" } }, "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the name of the database engine for this DB cluster snapshot.

" + "smithy.api#documentation": "

The name of the database engine for this DB cluster snapshot.

" } }, "EngineMode": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the engine mode of the database engine for this DB cluster snapshot.

" + "smithy.api#documentation": "

The engine mode of the database engine for this DB cluster snapshot.

" } }, "AllocatedStorage": { "target": "com.amazonaws.rds#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Specifies the allocated storage size in gibibytes (GiB).

" + "smithy.api#documentation": "

The allocated storage size of the DB cluster snapshot in gibibytes (GiB).

" } }, "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the status of this DB cluster snapshot. Valid statuses are the following:

\n
    \n
  • \n

    \n available\n

    \n
  • \n
  • \n

    \n copying\n

    \n
  • \n
  • \n

    \n creating\n

    \n
  • \n
" + "smithy.api#documentation": "

The status of this DB cluster snapshot. Valid statuses are the following:

\n
    \n
  • \n

    \n available\n

    \n
  • \n
  • \n

    \n copying\n

    \n
  • \n
  • \n

    \n creating\n

    \n
  • \n
" } }, "Port": { "target": "com.amazonaws.rds#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Specifies the port that the DB cluster was listening on at the time of the snapshot.

" + "smithy.api#documentation": "

The port that the DB cluster was listening on at the time of the snapshot.

" } }, "VpcId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the VPC ID associated with the DB cluster snapshot.

" + "smithy.api#documentation": "

The VPC ID associated with the DB cluster snapshot.

" } }, "ClusterCreateTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + "smithy.api#documentation": "

The time when the DB cluster was created, in Universal Coordinated Time (UTC).

" } }, "MasterUsername": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the master username for this DB cluster snapshot.

" + "smithy.api#documentation": "

The master username for this DB cluster snapshot.

" } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the version of the database engine for this DB cluster snapshot.

" + "smithy.api#documentation": "

The version of the database engine for this DB cluster snapshot.

" } }, "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the license model information for this DB cluster snapshot.

" + "smithy.api#documentation": "

The license model information for this DB cluster snapshot.

" } }, "SnapshotType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the type of the DB cluster snapshot.

" + "smithy.api#documentation": "

The type of the DB cluster snapshot.

" } }, "PercentProgress": { "target": "com.amazonaws.rds#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Specifies the percentage of the estimated data that has been transferred.

" + "smithy.api#documentation": "

The percentage of the estimated data that has been transferred.

" } }, "StorageEncrypted": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether the DB cluster snapshot is encrypted.

" + "smithy.api#documentation": "

Indicates whether the DB cluster snapshot is encrypted.

" } }, "KmsKeyId": { @@ -7874,7 +7886,7 @@ "DBClusterSnapshotArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) for the DB cluster snapshot.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the DB cluster snapshot.

" } }, "SourceDBClusterSnapshotArn": { @@ -7887,7 +7899,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

" + "smithy.api#documentation": "

Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" } }, "TagList": { @@ -7908,7 +7920,7 @@ "DbClusterResourceId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the resource ID of the DB cluster that this DB cluster snapshot was created from.

" + "smithy.api#documentation": "

The resource ID of the DB cluster that this DB cluster snapshot was created from.

" } } }, @@ -8114,7 +8126,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.

" + "smithy.api#documentation": "

Indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.

" } }, "SupportsReadReplica": { @@ -8146,14 +8158,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether you can use Aurora parallel query with a specific DB engine version.

" + "smithy.api#documentation": "

Indicates whether you can use Aurora parallel query with a specific DB engine version.

" } }, "SupportsGlobalDatabases": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether you can use Aurora global databases with a specific DB engine version.

" + "smithy.api#documentation": "

Indicates whether you can use Aurora global databases with a specific DB engine version.

" } }, "MajorEngineVersion": { @@ -8199,7 +8211,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether the engine version supports Babelfish for Aurora PostgreSQL.

" + "smithy.api#documentation": "

Indicates whether the engine version supports Babelfish for Aurora PostgreSQL.

" } }, "CustomDBEngineVersionManifest": { @@ -8211,7 +8223,7 @@ "SupportsCertificateRotationWithoutRestart": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the engine version supports rotating the server certificate without \n rebooting the DB instance.

" + "smithy.api#documentation": "

Indicates whether the engine version supports rotating the server certificate without \n rebooting the DB instance.

" } }, "SupportedCACertificateIdentifiers": { @@ -8223,7 +8235,7 @@ "SupportsLocalWriteForwarding": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB engine version supports forwarding write operations from reader DB instances \n to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

Indicates whether the DB engine version supports forwarding write operations from reader DB instances \n to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

\n

Valid for: Aurora DB clusters only

" } } }, @@ -8762,6 +8774,13 @@ "traits": { "smithy.api#documentation": "

The progress of the storage optimization operation as a percentage.

" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + } } }, "traits": { @@ -8815,20 +8834,20 @@ "RestoreWindow": { "target": "com.amazonaws.rds#RestoreWindow", "traits": { - "smithy.api#documentation": "

Earliest and latest time an instance can be restored to.

" + "smithy.api#documentation": "

The earliest and latest time a DB instance can be restored to.

" } }, "AllocatedStorage": { "target": "com.amazonaws.rds#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Specifies the allocated storage size in gibibytes (GiB).

" + "smithy.api#documentation": "

The allocated storage size for the the automated backup in gibibytes (GiB).

" } }, "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides a list of status information for an automated backup:

\n
    \n
  • \n

    \n active - Automated backups for current instances.

    \n
  • \n
  • \n

    \n retained - Automated backups for deleted instances.

    \n
  • \n
  • \n

    \n creating - Automated backups that are waiting for the first automated snapshot to be available.

    \n
  • \n
" + "smithy.api#documentation": "

A list of status information for an automated backup:

\n
    \n
  • \n

    \n active - Automated backups for current instances.

    \n
  • \n
  • \n

    \n retained - Automated backups for deleted instances.

    \n
  • \n
  • \n

    \n creating - Automated backups that are waiting for the first automated snapshot to be available.

    \n
  • \n
" } }, "Port": { @@ -8847,13 +8866,13 @@ "VpcId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Provides the VPC ID associated with the DB instance.

" + "smithy.api#documentation": "

The VPC ID associated with the DB instance.

" } }, "InstanceCreateTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "

Provides the date and time that the DB instance was created.

" + "smithy.api#documentation": "

The date and time when the DB instance was created.

" } }, "MasterUsername": { @@ -8877,7 +8896,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

License model information for the automated backup.

" + "smithy.api#documentation": "

The license model information for the automated backup.

" } }, "Iops": { @@ -8902,13 +8921,13 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether the automated backup is encrypted.

" + "smithy.api#documentation": "

Indicates whether the automated backup is encrypted.

" } }, "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type associated with the automated backup.

" + "smithy.api#documentation": "

The storage type associated with the automated backup.

" } }, "KmsKeyId": { @@ -8951,13 +8970,13 @@ "BackupTarget": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies where automated backups are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

" + "smithy.api#documentation": "

The location where automated backups are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

" } }, "StorageThroughput": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

Specifies the storage throughput for the automated backup.

" + "smithy.api#documentation": "

The storage throughput for the automated backup.

" } }, "AwsBackupRecoveryPointArn": { @@ -8965,6 +8984,12 @@ "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + } } }, "traits": { @@ -9123,12 +9148,12 @@ "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Describes the state of association between the IAM role and the DB instance. The Status property returns one of the following\n values:

\n
    \n
  • \n

    \n ACTIVE - the IAM role ARN is associated with the DB instance and can be used to\n access other Amazon Web Services services on your behalf.

    \n
  • \n
  • \n

    \n PENDING - the IAM role ARN is being associated with the DB instance.

    \n
  • \n
  • \n

    \n INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable\n to assume the IAM role in order to access other Amazon Web Services services on your behalf.

    \n
  • \n
" + "smithy.api#documentation": "

Information about the state of association between the IAM role and the DB instance. The Status property returns one of the following\n values:

\n
    \n
  • \n

    \n ACTIVE - the IAM role ARN is associated with the DB instance and can be used to\n access other Amazon Web Services services on your behalf.

    \n
  • \n
  • \n

    \n PENDING - the IAM role ARN is being associated with the DB instance.

    \n
  • \n
  • \n

    \n INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable\n to assume the IAM role in order to access other Amazon Web Services services on your behalf.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

Describes an Amazon Web Services Identity and Access Management (IAM) role that is associated with a DB instance.

" + "smithy.api#documentation": "

Information about an Amazon Web Services Identity and Access Management (IAM) role that is associated with a DB instance.

" } }, "com.amazonaws.rds#DBInstanceRoleAlreadyExistsFault": { @@ -9204,13 +9229,13 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

" + "smithy.api#documentation": "

A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

" } }, "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Status of the DB instance. For a StatusType of read replica, the values can be\n replicating, replication stop point set, replication stop point reached, error, stopped,\n or terminated.

" + "smithy.api#documentation": "

The status of the DB instance. For a StatusType of read replica, the values can be\n replicating, replication stop point set, replication stop point reached, error, stopped,\n or terminated.

" } }, "Message": { @@ -9507,7 +9532,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether the proxy includes detailed information about SQL statements in its logs.\n This information helps you to debug issues involving SQL behavior or the performance\n and scalability of the proxy connections. The debug information includes the text of\n SQL statements that you submit through the proxy. Thus, only enable this setting\n when needed for debugging, and only when you have security measures in place to\n safeguard any sensitive information that appears in the logs.

" + "smithy.api#documentation": "

Indicates whether the proxy includes detailed information about SQL statements in its logs.\n This information helps you to debug issues involving SQL behavior or the performance\n and scalability of the proxy connections. The debug information includes the text of\n SQL statements that you submit through the proxy. Thus, only enable this setting\n when needed for debugging, and only when you have security measures in place to\n safeguard any sensitive information that appears in the logs.

" } }, "CreatedDate": { @@ -9611,7 +9636,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether this endpoint is the default endpoint for the associated DB proxy.\n Default DB proxy endpoints always have read/write capability. Other endpoints that you associate with the\n DB proxy can be either read/write or read-only.

" + "smithy.api#documentation": "

Indicates whether this endpoint is the default endpoint for the associated DB proxy.\n Default DB proxy endpoints always have read/write capability. Other endpoints that you associate with the\n DB proxy can be either read/write or read-only.

" } } }, @@ -9952,7 +9977,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether this target group is the first one used for connection requests by the associated proxy.\n Because each proxy is currently associated with a single target group, currently this setting\n is always true.

" + "smithy.api#documentation": "

Indicates whether this target group is the first one used for connection requests by the associated proxy.\n Because each proxy is currently associated with a single target group, currently this setting\n is always true.

" } }, "Status": { @@ -10340,7 +10365,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether the DB snapshot is encrypted.

" + "smithy.api#documentation": "

Indicates whether the DB snapshot is encrypted.

" } }, "KmsKeyId": { @@ -10365,7 +10390,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

" + "smithy.api#documentation": "

Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" } }, "ProcessorFeatures": { @@ -10412,6 +10437,13 @@ "traits": { "smithy.api#documentation": "

The Oracle system identifier (SID), which is the name of the Oracle database instance that \n manages your database files. The Oracle SID is also the name of your CDB.

" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + } } }, "traits": { @@ -11015,7 +11047,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted.\n If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot \n is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. \n By default, this parameter is disabled.

\n \n

You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled.

\n
" + "smithy.api#documentation": "

Specifies whether to skip the creation of a final DB cluster snapshot before the DB cluster is deleted.\n If skip is specified, no DB cluster snapshot is created. If skip isn't specified, a DB cluster snapshot \n is created before the DB cluster is deleted. By default, skip isn't specified, and the DB cluster snapshot is created. \n By default, this parameter is disabled.

\n \n

You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is disabled.

\n
" } }, "FinalDBSnapshotIdentifier": { @@ -11027,7 +11059,7 @@ "DeleteAutomatedBackups": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.

" + "smithy.api#documentation": "

Specifies whether to remove automated backups immediately after the DB\n cluster is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB cluster is deleted.

" } } }, @@ -11204,7 +11236,7 @@ } ], "traits": { - "smithy.api#documentation": "

The DeleteDBInstance action deletes a previously provisioned DB instance. \n When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. \n Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

\n

If you request a final DB snapshot \n the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance\n is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

\n

When a DB instance is in a failure state and has a status of failed, incompatible-restore, \n or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

\n

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following\n conditions are true:

\n
    \n
  • \n

    The DB cluster is a read replica of another Amazon Aurora DB cluster.

    \n
  • \n
  • \n

    The DB instance is the only instance in the DB cluster.

    \n
  • \n
\n

To delete a DB instance in this case, first call the\n PromoteReadReplicaDBCluster API action to promote the DB cluster so\n it's no longer a read replica. After the promotion completes, then call the\n DeleteDBInstance API action to delete the final instance in the DB\n cluster.

", + "smithy.api#documentation": "

Deletes a previously provisioned DB instance. \n When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. \n However, manual DB snapshots of the DB instance aren't deleted.

\n

If you request a final DB snapshot, the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. \n This operation can't be canceled or reverted after it begins. To monitor the status of this operation, use DescribeDBInstance.

\n

When a DB instance is in a failure state and has a status of failed, incompatible-restore, \n or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

\n

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following\n conditions are true:

\n
    \n
  • \n

    The DB cluster is a read replica of another Amazon Aurora DB cluster.

    \n
  • \n
  • \n

    The DB instance is the only instance in the DB cluster.

    \n
  • \n
\n

To delete a DB instance in this case, first use the PromoteReadReplicaDBCluster operation to promote the DB cluster so that it's no longer a read replica. \n After the promotion completes, use the DeleteDBInstance operation to delete the final instance in the DB cluster.

\n \n

For RDS Custom DB instances, deleting the DB instance permanently deletes the EC2 instance and the associated EBS volumes. Make sure that you don't terminate or delete \n these resources before you delete the DB instance. Otherwise, deleting the DB instance and creation of the final snapshot might fail.

\n
", "smithy.api#examples": [ { "title": "To delete a DB instance", @@ -11324,7 +11356,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to skip the creation of a final DB snapshot before deleting the instance.\n If you enable this parameter, RDS doesn't create a DB snapshot. If you don't enable this parameter, \n RDS creates a DB snapshot before the DB instance is deleted. By default, skip isn't enabled, \n and the DB snapshot is created.

\n \n

If you don't enable this parameter, you must specify the FinalDBSnapshotIdentifier parameter.

\n
\n

When a DB instance is in a failure state and has a status of failed, incompatible-restore, \n or incompatible-network, RDS can delete the instance only if you enable this parameter.

\n

If you delete a read replica or an RDS Custom instance, you must enable this setting.

\n

This setting is required for RDS Custom.

" + "smithy.api#documentation": "

Specifies whether to skip the creation of a final DB snapshot before deleting the instance.\n If you enable this parameter, RDS doesn't create a DB snapshot. If you don't enable this parameter, \n RDS creates a DB snapshot before the DB instance is deleted. By default, skip isn't enabled, \n and the DB snapshot is created.

\n \n

If you don't enable this parameter, you must specify the FinalDBSnapshotIdentifier parameter.

\n
\n

When a DB instance is in a failure state and has a status of failed, incompatible-restore, \n or incompatible-network, RDS can delete the instance only if you enable this parameter.

\n

If you delete a read replica or an RDS Custom instance, you must enable this setting.

\n

This setting is required for RDS Custom.

" } }, "FinalDBSnapshotIdentifier": { @@ -11336,7 +11368,7 @@ "DeleteAutomatedBackups": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to remove automated backups immediately after the DB\n instance is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB instance is deleted.

" + "smithy.api#documentation": "

Specifies whether to remove automated backups immediately after the DB\n instance is deleted. This parameter isn't case-sensitive. The default is to remove \n automated backups immediately after the DB instance is deleted.

" } } }, @@ -12589,7 +12621,7 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A value that indicates to return only parameters for a specific source. \n Parameter sources can be engine, service,\n or customer.

" + "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n customer\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" } }, "Filters": { @@ -12939,14 +12971,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to include shared manual DB cluster snapshots \n from other Amazon Web Services accounts that this Amazon Web Services account has been given \n permission to copy or restore. By default, these snapshots are not included.

\n

You can give an Amazon Web Services account permission to restore a manual DB cluster snapshot from\n another Amazon Web Services account by the ModifyDBClusterSnapshotAttribute API action.

" + "smithy.api#documentation": "

Specifies whether to include shared manual DB cluster snapshots \n from other Amazon Web Services accounts that this Amazon Web Services account has been given \n permission to copy or restore. By default, these snapshots are not included.

\n

You can give an Amazon Web Services account permission to restore a manual DB cluster snapshot from\n another Amazon Web Services account by the ModifyDBClusterSnapshotAttribute API action.

" } }, "IncludePublic": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to include manual DB cluster snapshots that are public and can be copied \n or restored by any Amazon Web Services account. By default, the public snapshots are not included.

\n

You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.

" + "smithy.api#documentation": "

Specifies whether to include manual DB cluster snapshots that are public and can be copied \n or restored by any Amazon Web Services account. By default, the public snapshots are not included.

\n

You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.

" } }, "DbClusterResourceId": { @@ -13255,7 +13287,7 @@ "target": "com.amazonaws.rds#DBEngineVersionMessage" }, "traits": { - "smithy.api#documentation": "

Returns a list of the available DB engines.

", + "smithy.api#documentation": "

Describes the properties of specific versions of DB engines.

", "smithy.api#examples": [ { "title": "To describe the DB engine versions for the MySQL DB engine", @@ -13306,19 +13338,19 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The database engine to return.

\n

Valid Values:

\n
    \n
  • \n

    \n aurora-mysql\n

    \n
  • \n
  • \n

    \n aurora-postgresql\n

    \n
  • \n
  • \n

    \n custom-oracle-ee\n

    \n
  • \n
  • \n

    \n mariadb\n

    \n
  • \n
  • \n

    \n mysql\n

    \n
  • \n
  • \n

    \n oracle-ee\n

    \n
  • \n
  • \n

    \n oracle-ee-cdb\n

    \n
  • \n
  • \n

    \n oracle-se2\n

    \n
  • \n
  • \n

    \n oracle-se2-cdb\n

    \n
  • \n
  • \n

    \n postgres\n

    \n
  • \n
  • \n

    \n sqlserver-ee\n

    \n
  • \n
  • \n

    \n sqlserver-se\n

    \n
  • \n
  • \n

    \n sqlserver-ex\n

    \n
  • \n
  • \n

    \n sqlserver-web\n

    \n
  • \n
" + "smithy.api#documentation": "

The database engine to return version details for.

\n

Valid Values:

\n
    \n
  • \n

    \n aurora-mysql\n

    \n
  • \n
  • \n

    \n aurora-postgresql\n

    \n
  • \n
  • \n

    \n custom-oracle-ee\n

    \n
  • \n
  • \n

    \n mariadb\n

    \n
  • \n
  • \n

    \n mysql\n

    \n
  • \n
  • \n

    \n oracle-ee\n

    \n
  • \n
  • \n

    \n oracle-ee-cdb\n

    \n
  • \n
  • \n

    \n oracle-se2\n

    \n
  • \n
  • \n

    \n oracle-se2-cdb\n

    \n
  • \n
  • \n

    \n postgres\n

    \n
  • \n
  • \n

    \n sqlserver-ee\n

    \n
  • \n
  • \n

    \n sqlserver-se\n

    \n
  • \n
  • \n

    \n sqlserver-ex\n

    \n
  • \n
  • \n

    \n sqlserver-web\n

    \n
  • \n
" } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The database engine version to return.

\n

Example: 5.1.49\n

" + "smithy.api#documentation": "

A specific database engine version to return details for.

\n

Example: 5.1.49\n

" } }, "DBParameterGroupFamily": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The name of a specific DB parameter group family to return details for.

\n

Constraints:

\n
    \n
  • \n

    If supplied, must match an existing DBParameterGroupFamily.

    \n
  • \n
" + "smithy.api#documentation": "

The name of a specific DB parameter group family to return details for.

\n

Constraints:

\n
    \n
  • \n

    If supplied, must match an existing DB parameter group family.

    \n
  • \n
" } }, "Filters": { @@ -13343,25 +13375,25 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether only the default version of the specified engine or engine and major version combination is returned.

" + "smithy.api#documentation": "

Specifies whether to return only the default version of the specified engine or the engine and major version combination.

" } }, "ListSupportedCharacterSets": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to list the supported character sets for each engine version.

\n

If this parameter is enabled and the requested engine supports the CharacterSetName parameter for\n CreateDBInstance, the response includes a list of supported character sets for each engine\n version.

\n

For RDS Custom, the default is not to list supported character sets. If you set ListSupportedCharacterSets\n to true, RDS Custom returns no results.

" + "smithy.api#documentation": "

Specifies whether to list the supported character sets for each engine version.

\n

If this parameter is enabled and the requested engine supports the CharacterSetName parameter for\n CreateDBInstance, the response includes a list of supported character sets for each engine\n version.

\n

For RDS Custom, the default is not to list supported character sets. If you enable this parameter, RDS Custom returns no results.

" } }, "ListSupportedTimezones": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to list the supported time zones for each engine version.

\n

If this parameter is enabled and the requested engine supports the TimeZone parameter for CreateDBInstance, \n the response includes a list of supported time zones for each engine version.

\n

For RDS Custom, the default is not to list supported time zones. If you set ListSupportedTimezones\n to true, RDS Custom returns no results.

" + "smithy.api#documentation": "

Specifies whether to list the supported time zones for each engine version.

\n

If this parameter is enabled and the requested engine supports the TimeZone parameter for CreateDBInstance, \n the response includes a list of supported time zones for each engine version.

\n

For RDS Custom, the default is not to list supported time zones. If you enable this parameter, RDS Custom returns no results.

" } }, "IncludeAll": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to include engine versions that aren't available in the list. The default is to list only available engine versions.

" + "smithy.api#documentation": "

Specifies whether to also list the engine versions that aren't available. The default is to list only available engine versions.

" } } }, @@ -14754,14 +14786,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to include shared manual DB cluster snapshots \n from other Amazon Web Services accounts that this Amazon Web Services account has been given \n permission to copy or restore. By default, these snapshots are not included.

\n

You can give an Amazon Web Services account permission to restore a manual DB snapshot from\n another Amazon Web Services account by using the ModifyDBSnapshotAttribute API action.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether to include shared manual DB cluster snapshots \n from other Amazon Web Services accounts that this Amazon Web Services account has been given \n permission to copy or restore. By default, these snapshots are not included.

\n

You can give an Amazon Web Services account permission to restore a manual DB snapshot from\n another Amazon Web Services account by using the ModifyDBSnapshotAttribute API action.

\n

This setting doesn't apply to RDS Custom.

" } }, "IncludePublic": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to include manual DB cluster snapshots that are public and can be copied \n or restored by any Amazon Web Services account. By default, the public snapshots are not included.

\n

You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute API.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether to include manual DB cluster snapshots that are public and can be copied \n or restored by any Amazon Web Services account. By default, the public snapshots are not included.

\n

You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute API.

\n

This setting doesn't apply to RDS Custom.

" } }, "DbiResourceId": { @@ -15136,7 +15168,7 @@ "SourceType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of source that is generating the events. For RDS Proxy events, specify db-proxy.

\n

Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" + "smithy.api#documentation": "

The type of source that is generating the events. For RDS Proxy events, specify db-proxy.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" } }, "Filters": { @@ -15819,7 +15851,7 @@ "Vpc": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to show only VPC or non-VPC offerings. RDS Custom supports \n only VPC offerings.

\n

RDS Custom supports only VPC offerings. If you describe non-VPC offerings for RDS Custom, the output \n shows VPC offerings.

" + "smithy.api#documentation": "

Specifies whether to show only VPC or non-VPC offerings. RDS Custom supports \n only VPC offerings.

\n

RDS Custom supports only VPC offerings. If you describe non-VPC offerings for RDS Custom, the output \n shows VPC offerings.

" } }, "Filters": { @@ -16019,7 +16051,7 @@ "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to show only those reservations that support Multi-AZ.

" + "smithy.api#documentation": "

Specifies whether to show only those reservations that support Multi-AZ.

" } }, "LeaseId": { @@ -16141,7 +16173,7 @@ "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to show only those reservations that support Multi-AZ.

" + "smithy.api#documentation": "

Specifies whether to show only those reservations that support Multi-AZ.

" } }, "Filters": { @@ -16616,7 +16648,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Boolean value that if true, indicates there is more data to be downloaded.

" + "smithy.api#documentation": "

A Boolean value that, if true, indicates there is more data to be downloaded.

" } } }, @@ -17081,31 +17113,31 @@ "ExportOnly": { "target": "com.amazonaws.rds#StringList", "traits": { - "smithy.api#documentation": "

The data exported from the snapshot or cluster. Valid values are the following:

\n
    \n
  • \n

    \n database - Export all the data from a specified database.

    \n
  • \n
  • \n

    \n database.table\n table-name - \n Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL.

    \n
  • \n
  • \n

    \n database.schema\n schema-name - Export a database schema of the snapshot or cluster. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
  • \n

    \n database.schema.table\n table-name - Export a table of the database schema. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
" + "smithy.api#documentation": "

The data exported from the snapshot or cluster.

\n

Valid Values:

\n
    \n
  • \n

    \n database - Export all the data from a specified database.

    \n
  • \n
  • \n

    \n database.table\n table-name - \n Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL.

    \n
  • \n
  • \n

    \n database.schema\n schema-name - Export a database schema of the snapshot or cluster. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
  • \n

    \n database.schema.table\n table-name - Export a table of the database schema. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
" } }, "SnapshotTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "

The time that the snapshot was created.

" + "smithy.api#documentation": "

The time when the snapshot was created.

" } }, "TaskStartTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "

The time that the snapshot or cluster export task started.

" + "smithy.api#documentation": "

The time when the snapshot or cluster export task started.

" } }, "TaskEndTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "

The time that the snapshot or cluster export task ended.

" + "smithy.api#documentation": "

The time when the snapshot or cluster export task ended.

" } }, "S3Bucket": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon S3 bucket that the snapshot or cluster is exported to.

" + "smithy.api#documentation": "

The Amazon S3 bucket where the snapshot or cluster is exported to.

" } }, "S3Prefix": { @@ -17166,7 +17198,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the details of a snapshot or cluster export to Amazon S3.

\n

This data type is used as a response element in the DescribeExportTasks action.

" + "smithy.api#documentation": "

Contains the details of a snapshot or cluster export to Amazon S3.

\n

This data type is used as a response element in the DescribeExportTasks operation.

" } }, "com.amazonaws.rds#ExportTaskAlreadyExistsFault": { @@ -17274,7 +17306,7 @@ "DBClusterIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A DB cluster identifier to force a failover for. This parameter isn't case-sensitive.

\n

Constraints:

\n
    \n
  • \n

    Must match the identifier of an existing DBCluster.

    \n
  • \n
", + "smithy.api#documentation": "

The identifier of the DB cluster to force a failover for. This parameter isn't case-sensitive.

\n

Constraints:

\n
    \n
  • \n

    Must match the identifier of an existing DB cluster.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -17603,13 +17635,13 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether the Aurora DB cluster is the primary cluster\n (that is, has read-write capability) for the global\n cluster with which it is associated.

" + "smithy.api#documentation": "

Indicates whether the Aurora DB cluster is the primary cluster\n (that is, has read-write capability) for the global\n cluster with which it is associated.

" } }, "GlobalWriteForwardingStatus": { "target": "com.amazonaws.rds#WriteForwardingStatus", "traits": { - "smithy.api#documentation": "

Specifies whether a secondary cluster in the global cluster has\n write forwarding enabled, not enabled, or is in the process of enabling it.

" + "smithy.api#documentation": "

The status of write forwarding for a secondary cluster in the global cluster.

" } }, "SynchronizationStatus": { @@ -17732,13 +17764,13 @@ "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

" + "smithy.api#documentation": "

The status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

" } }, "CIDRIP": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the IP range.

" + "smithy.api#documentation": "

The IP range.

" } } }, @@ -18701,7 +18733,7 @@ "RemoveCustomerOverride": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to remove the override for the default certificate. \n If the override is removed, the default certificate is the system\n default.

" + "smithy.api#documentation": "

Specifies whether to remove the override for the default certificate. \n If the override is removed, the default certificate is the system\n default.

" } } }, @@ -19976,6 +20008,12 @@ "traits": { "smithy.api#documentation": "

The target Oracle DB engine when you convert a non-CDB to a CDB. This intermediate step is necessary to upgrade an Oracle Database 19c non-CDB\n to an Oracle Database 21c CDB.

\n

Note the following requirements:

\n
    \n
  • \n

    Make sure that you specify oracle-ee-cdb or oracle-se2-cdb.

    \n
  • \n
  • \n

    Make sure that your DB engine runs Oracle Database 19c with an April 2021 or later RU.

    \n
  • \n
\n

Note the following limitations:

\n
    \n
  • \n

    You can't convert a CDB to a non-CDB.

    \n
  • \n
  • \n

    You can't convert a replica database.

    \n
  • \n
  • \n

    You can't convert a non-CDB to a CDB and upgrade the engine version in the\n same command.

    \n
  • \n
  • \n

    You can't convert the existing custom parameter or option group when it has\n options or parameters that are permanent or persistent. In this situation, the\n DB instance reverts to the default option and parameter group. To avoid\n reverting to the default, specify a new parameter group with\n --db-parameter-group-name and a new option group with\n --option-group-name.

    \n
  • \n
" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + } } }, "traits": { @@ -20677,7 +20715,7 @@ "SourceType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

\n

Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" + "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" } }, "EventCategories": { @@ -20689,7 +20727,7 @@ "Enabled": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to activate the subscription.

" + "smithy.api#documentation": "

Specifies whether to activate the subscription.

" } } }, @@ -20874,7 +20912,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to apply the change immediately or during the next maintenance window for each instance associated with the option group.

" + "smithy.api#documentation": "

Specifies whether to apply the change immediately or during the next maintenance window for each instance associated with the option group.

" } } }, @@ -20930,14 +20968,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicate if this option is persistent.

" + "smithy.api#documentation": "

Indicates whether this option is persistent.

" } }, "Permanent": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicate if this option is permanent.

" + "smithy.api#documentation": "

Indicates whether this option is permanent.

" } }, "Port": { @@ -20972,7 +21010,7 @@ } }, "traits": { - "smithy.api#documentation": "

Option details.

" + "smithy.api#documentation": "

The details of an option.

" } }, "com.amazonaws.rds#OptionConfiguration": { @@ -21204,7 +21242,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether the option requires a port.

" + "smithy.api#documentation": "

Indicates whether the option requires a port.

" } }, "DefaultPort": { @@ -21274,7 +21312,7 @@ "CopyableCrossAccount": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether the option can be copied across Amazon Web Services accounts.

" + "smithy.api#documentation": "

Indicates whether the option can be copied across Amazon Web Services accounts.

" } } }, @@ -21319,14 +21357,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Boolean value where true indicates that this option group option can be changed from the default value.

" + "smithy.api#documentation": "

Indicates whether this option group option can be changed from the default value.

" } }, "IsRequired": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Boolean value where true indicates that a value must be specified for this option setting of the option group option.

" + "smithy.api#documentation": "

Indicates whether a value must be specified for this option setting of the option group option.

" } }, "MinimumEngineVersionPerAllowedValue": { @@ -21489,14 +21527,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A Boolean value that, when true, indicates the option setting can be modified from the default.

" + "smithy.api#documentation": "

Indicates whether the option setting can be modified from the default.

" } }, "IsCollection": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates if the option setting is part of a collection.

" + "smithy.api#documentation": "

Indicates whether the option setting is part of a collection.

" } } }, @@ -21535,7 +21573,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

True if the version is the default version of the option, and otherwise false.

" + "smithy.api#documentation": "

Indicates whether the version is the default version of the option.

" } } }, @@ -21640,7 +21678,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Indicates the storage type for a DB instance.

" + "smithy.api#documentation": "

The storage type for a DB instance.

" } }, "SupportsIops": { @@ -21668,7 +21706,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

True if a DB instance supports Performance Insights, otherwise false.

" + "smithy.api#documentation": "

Indicates whether a DB instance supports Performance Insights.

" } }, "MinStorageSize": { @@ -21722,20 +21760,20 @@ "SupportsStorageAutoscaling": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class.

" + "smithy.api#documentation": "

Indicates whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class.

" } }, "SupportsKerberosAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Whether a DB instance supports Kerberos Authentication.

" + "smithy.api#documentation": "

Indicates whether a DB instance supports Kerberos Authentication.

" } }, "OutpostCapable": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether a DB instance supports RDS on Outposts.

\n

For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

Indicates whether a DB instance supports RDS on Outposts.

\n

For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.\n

" } }, "SupportedActivityStreamModes": { @@ -21748,14 +21786,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether you can use Aurora global databases with a specific combination of other DB engine attributes.

" + "smithy.api#documentation": "

Indicates whether you can use Aurora global databases with a specific combination of other DB engine attributes.

" } }, "SupportsClusters": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether DB instances can be configured as a Multi-AZ DB cluster.

\n

For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

Indicates whether DB instances can be configured as a Multi-AZ DB cluster.

\n

For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

" } }, "SupportedNetworkTypes": { @@ -21794,6 +21832,13 @@ "traits": { "smithy.api#documentation": "

Maximum storage throughput to provisioned IOPS ratio for a DB instance.

" } + }, + "SupportsDedicatedLogVolume": { + "target": "com.amazonaws.rds#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether a DB instance supports using a dedicated log volume (DLV).

" + } } }, "traits": { @@ -21850,13 +21895,13 @@ "ParameterName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the name of the parameter.

" + "smithy.api#documentation": "

The name of the parameter.

" } }, "ParameterValue": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the value of the parameter.

" + "smithy.api#documentation": "

The value of the parameter.

" } }, "Description": { @@ -21868,7 +21913,7 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Indicates the source of the parameter value.

" + "smithy.api#documentation": "

The source of the parameter value.

" } }, "ApplyType": { @@ -22067,7 +22112,7 @@ "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates that the Single-AZ DB instance will change to a Multi-AZ deployment.

" + "smithy.api#documentation": "

Indicates whether the Single-AZ DB instance will change to a Multi-AZ deployment.

" } }, "EngineVersion": { @@ -22124,7 +22169,7 @@ "IAMDatabaseAuthenticationEnabled": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" + "smithy.api#documentation": "

Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

" } }, "AutomationMode": { @@ -22150,6 +22195,12 @@ "traits": { "smithy.api#documentation": "

The database engine of the DB instance.

" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.>

" + } } }, "traits": { @@ -22616,7 +22667,7 @@ "ForceFailover": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the reboot is conducted through a Multi-AZ failover.

\n

Constraint: You can't enable force failover if the instance isn't configured for Multi-AZ.

" + "smithy.api#documentation": "

Specifies whether the reboot is conducted through a Multi-AZ failover.

\n

Constraint: You can't enable force failover if the instance isn't configured for Multi-AZ.

" } } }, @@ -23203,7 +23254,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates if the reservation applies to Multi-AZ deployments.

" + "smithy.api#documentation": "

Indicates whether the reservation applies to Multi-AZ deployments.

" } }, "State": { @@ -23374,7 +23425,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates if the offering applies to Multi-AZ deployments.

" + "smithy.api#documentation": "

Indicates whether the offering applies to Multi-AZ deployments.

" } }, "RecurringCharges": { @@ -23482,7 +23533,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to reset all parameters in the DB cluster parameter group \n to their default values. You can't use this parameter if there \n is a list of parameter names specified for the Parameters parameter.

" + "smithy.api#documentation": "

Specifies whether to reset all parameters in the DB cluster parameter group \n to their default values. You can't use this parameter if there \n is a list of parameter names specified for the Parameters parameter.

" } }, "Parameters": { @@ -23544,7 +23595,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to reset all parameters in the DB parameter group to default values. \n By default, all parameters in the DB parameter group are reset to default values.

" + "smithy.api#documentation": "

Specifies whether to reset all parameters in the DB parameter group to default values. \n By default, all parameters in the DB parameter group are reset to default values.

" } }, "Parameters": { @@ -23825,7 +23876,7 @@ "StorageEncrypted": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the restored DB cluster is encrypted.

" + "smithy.api#documentation": "

Specifies whether the restored DB cluster is encrypted.

" } }, "KmsKeyId": { @@ -23837,13 +23888,13 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

\n

For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

" + "smithy.api#documentation": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

\n

For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

" } }, "SourceEngine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The identifier for the database engine that was backed up to create the files stored in the\n Amazon S3 bucket.

\n

Valid values: mysql\n

", + "smithy.api#documentation": "

The identifier for the database engine that was backed up to create the files stored in the\n Amazon S3 bucket.

\n

Valid Values: mysql\n

", "smithy.api#required": {} } }, @@ -23889,13 +23940,13 @@ "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB cluster has deletion protection enabled. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled.

" + "smithy.api#documentation": "

Specifies whether to enable deletion protection for the DB cluster. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled.

" } }, "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

" } }, "Domain": { @@ -23916,13 +23967,13 @@ "NetworkType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The network type of the DB cluster.

\n

Valid values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

" + "smithy.api#documentation": "

The network type of the DB cluster.

\n

Valid Values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

" } }, "ManageMasterUserPassword": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

\n

For more information, see Password management with Amazon Web Services Secrets Manager \n in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager \n in the Amazon Aurora User Guide.\n

\n

Constraints:

\n
    \n
  • \n

    Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword \n is specified.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

\n

For more information, see Password management with Amazon Web Services Secrets Manager \n in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager \n in the Amazon Aurora User Guide.\n

\n

Constraints:

\n
    \n
  • \n

    Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword \n is specified.

    \n
  • \n
" } }, "MasterUserSecretKmsKeyId": { @@ -23934,7 +23985,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

Valid values: aurora, aurora-iopt1\n

\n

Default: aurora\n

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

Valid Values: aurora, aurora-iopt1\n

\n

Default: aurora\n

\n

Valid for: Aurora DB clusters only

" } } }, @@ -24164,7 +24215,7 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

\n

For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

\n

For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

\n

Valid for: Aurora DB clusters only

" } }, "BacktrackWindow": { @@ -24200,25 +24251,25 @@ "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB cluster has deletion protection enabled. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies whether to enable deletion protection for the DB cluster. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "Domain": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specify the Active Directory directory ID to restore the DB cluster in.\n The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

\n

For more information, see \n Kerberos Authentication in the Amazon RDS User Guide.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The Active Directory directory ID to restore the DB cluster in.\n The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

\n

For more information, see \n Kerberos Authentication in the Amazon RDS User Guide.

\n

Valid for: Aurora DB clusters only

" } }, "DomainIAMRoleName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The name of the IAM role to be used when making API calls to the Directory Service.

\n

Valid for: Aurora DB clusters only

" } }, "DBClusterInstanceClass": { @@ -24230,7 +24281,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

\n

Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

\n

Valid Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "Iops": { @@ -24242,7 +24293,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. \n Access to the DB cluster is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. \n Access to the DB cluster is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "ServerlessV2ScalingConfiguration": { @@ -24251,7 +24302,7 @@ "NetworkType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The network type of the DB cluster.

\n

Valid values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The network type of the DB cluster.

\n

Valid Values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

\n

Valid for: Aurora DB clusters only

" } } }, @@ -24433,7 +24484,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether to restore the DB cluster to the latest \n restorable backup time. By default, the DB cluster isn't restored to the latest \n restorable backup time.

\n

Constraints: Can't be specified if RestoreToTime parameter is provided.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies whether to restore the DB cluster to the latest \n restorable backup time. By default, the DB cluster isn't restored to the latest \n restorable backup time.

\n

Constraints: Can't be specified if RestoreToTime parameter is provided.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "Port": { @@ -24472,7 +24523,7 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

\n

For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

\n

For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

\n

Valid for: Aurora DB clusters only

" } }, "BacktrackWindow": { @@ -24496,25 +24547,25 @@ "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB cluster has deletion protection enabled. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies whether to enable deletion protection for the DB cluster. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the restored DB cluster to snapshots of the restored DB cluster. The default is not to copy them.

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "Domain": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specify the Active Directory directory ID to restore the DB cluster in.\n The domain must be created prior to this operation.

\n

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster.\n For more information, see Kerberos Authentication\n in the Amazon Aurora User Guide.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The Active Directory directory ID to restore the DB cluster in.\n The domain must be created prior to this operation.

\n

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster.\n For more information, see Kerberos Authentication\n in the Amazon Aurora User Guide.

\n

Valid for: Aurora DB clusters only

" } }, "DomainIAMRoleName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The name of the IAM role to be used when making API calls to the Directory Service.

\n

Valid for: Aurora DB clusters only

" } }, "ScalingConfiguration": { @@ -24538,13 +24589,13 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

\n

Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

\n

Valid Values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

\n

Valid for: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB cluster's virtual private cloud (VPC). It resolves\n to the public IP address from outside of the DB cluster's VPC. \n Access to the DB cluster is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

Valid for: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether the DB cluster is publicly accessible.

\n

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB cluster's virtual private cloud (VPC). It resolves\n to the public IP address from outside of the DB cluster's VPC. \n Access to the DB cluster is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB cluster doesn't permit it.

\n

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

    \n
  • \n
\n

Valid for: Multi-AZ DB clusters only

" } }, "Iops": { @@ -24559,7 +24610,7 @@ "NetworkType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The network type of the DB cluster.

\n

Valid values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The network type of the DB cluster.

\n

Valid Values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

\n

Valid for: Aurora DB clusters only

" } }, "SourceDbClusterResourceId": { @@ -24739,25 +24790,25 @@ "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance is a Multi-AZ deployment.

\n

This setting doesn't apply to RDS Custom.

\n

Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

" + "smithy.api#documentation": "

Specifies whether the DB instance is a Multi-AZ deployment.

\n

This setting doesn't apply to RDS Custom.

\n

Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

" } }, "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB instance's virtual private cloud (VPC). \n It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled \n by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" + "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB instance's virtual private cloud (VPC). \n It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled \n by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" } }, "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether minor version upgrades are applied automatically to the DB instance \n during the maintenance window.

\n

If you restore an RDS Custom DB instance, you must disable this parameter.

" + "smithy.api#documentation": "

Specifies whether to automatically apply minor version upgrades to the DB instance \n during the maintenance window.

\n

If you restore an RDS Custom DB instance, you must disable this parameter.

" } }, "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

License model information for the restored DB instance.

\n

This setting doesn't apply to RDS Custom.

\n

Default: Same as source.

\n

Valid values: license-included | bring-your-own-license | general-public-license\n

" + "smithy.api#documentation": "

License model information for the restored DB instance.

\n

This setting doesn't apply to RDS Custom.

\n

Default: Same as source.

\n

Valid Values: license-included | bring-your-own-license | general-public-license\n

" } }, "DBName": { @@ -24790,7 +24841,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type to be associated with the DB instance.

\n

Valid values: gp2 | gp3 | io1 | standard\n

\n

If you specify io1 or gp3, you must also include a value for the\n Iops parameter.

\n

Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

" + "smithy.api#documentation": "

Specifies the storage type to be associated with the DB instance.

\n

Valid Values: gp2 | gp3 | io1 | standard\n

\n

If you specify io1 or gp3, you must also include a value for the\n Iops parameter.

\n

Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

" } }, "TdeCredentialArn": { @@ -24832,7 +24883,7 @@ "DomainAuthSecretArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

\n

Constraints:

\n

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456\n

" + "smithy.api#documentation": "

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

\n

Constraints:

\n
    \n
  • \n

    Can't be longer than 64 characters.

    \n
  • \n
\n

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456\n

" } }, "DomainDnsIps": { @@ -24844,7 +24895,7 @@ "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the restored DB instance to snapshots of the DB instance.

\n

In most cases, tags aren't copied by default. However, when you restore a DB instance from a DB snapshot, RDS checks whether you \n specify new tags. If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS looks for the tags from\n the source DB instance for the DB snapshot, and then adds those tags to the restored DB instance.

\n

For more information, see \n Copying tags to DB instance snapshots in the Amazon RDS User Guide.

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the restored DB instance to snapshots of the DB instance.

\n

In most cases, tags aren't copied by default. However, when you restore a DB instance from a DB snapshot, RDS checks whether you \n specify new tags. If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS looks for the tags from\n the source DB instance for the DB snapshot, and then adds those tags to the restored DB instance.

\n

For more information, see \n Copying tags to DB instance snapshots in the Amazon RDS User Guide.

" } }, "DomainIAMRoleName": { @@ -24856,7 +24907,7 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping is disabled.

\n

For more information about IAM database authentication, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.\n

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping is disabled.

\n

For more information about IAM database authentication, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.\n

\n

This setting doesn't apply to RDS Custom.

" } }, "EnableCloudwatchLogsExports": { @@ -24874,7 +24925,7 @@ "UseDefaultProcessorFeatures": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance class of the DB instance uses its default\n processor features.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether the DB instance class of the DB instance uses its default\n processor features.

\n

This setting doesn't apply to RDS Custom.

" } }, "DBParameterGroupName": { @@ -24886,13 +24937,13 @@ "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance has deletion protection enabled. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled. For more information, see \n \n Deleting a DB Instance.

" + "smithy.api#documentation": "

Specifies whether to enable deletion protection for the DB instance. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled. For more information, see \n \n Deleting a DB Instance.

" } }, "EnableCustomerOwnedIp": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

\n

A CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.

\n

This setting doesn't apply to RDS Custom.

\n

For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.

\n

For more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.

" + "smithy.api#documentation": "

Specifies whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

\n

A CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.

\n

This setting doesn't apply to RDS Custom.

\n

For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.

\n

For more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.

" } }, "CustomIamInstanceProfile": { @@ -24910,7 +24961,7 @@ "NetworkType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The network type of the DB instance.

\n

Valid values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB instance. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

The network type of the DB instance.

\n

Valid Values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB instance. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon RDS User Guide.\n

" } }, "StorageThroughput": { @@ -24930,6 +24981,12 @@ "traits": { "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in\n CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also\n allocate additional storage for future growth.

\n
" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to enable a dedicated log volume (DLV) for the DB instance.

" + } } }, "traits": { @@ -25121,7 +25178,7 @@ "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance is a Multi-AZ deployment. \n If the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone parameter.

" + "smithy.api#documentation": "

Specifies whether the DB instance is a Multi-AZ deployment. \n If the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone parameter.

" } }, "EngineVersion": { @@ -25133,7 +25190,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether minor engine upgrades are applied automatically \n to the DB instance during the maintenance window. By default, minor engine upgrades \n are not applied automatically.

" + "smithy.api#documentation": "

Specifies whether to automatically apply minor engine upgrades \n to the DB instance during the maintenance window. By default, minor engine upgrades \n are not applied automatically.

" } }, "LicenseModel": { @@ -25157,7 +25214,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB instance's virtual private cloud (VPC). \n It resolves to the public IP address from outside of the DB instance's VPC. \n Access to the DB instance is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" + "smithy.api#documentation": "

Specifies whether the DB instance is publicly accessible.

\n

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address \n from within the DB instance's virtual private cloud (VPC). \n It resolves to the public IP address from outside of the DB instance's VPC. \n Access to the DB instance is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

\n

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

\n

For more information, see CreateDBInstance.

" } }, "Tags": { @@ -25169,13 +25226,13 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

Specifies the storage type to be associated with the DB instance.

\n

Valid values: gp2 | gp3 | io1 | standard\n

\n

If you specify io1 or gp3, \n you must also include a value for the Iops parameter.

\n

Default: io1 \n if the Iops parameter is specified; \n otherwise gp2\n

" + "smithy.api#documentation": "

Specifies the storage type to be associated with the DB instance.

\n

Valid Values: gp2 | gp3 | io1 | standard\n

\n

If you specify io1 or gp3, \n you must also include a value for the Iops parameter.

\n

Default: io1 \n if the Iops parameter is specified; \n otherwise gp2\n

" } }, "StorageEncrypted": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the new DB instance is encrypted or not.

" + "smithy.api#documentation": "

Specifies whether the new DB instance is encrypted or not.

" } }, "KmsKeyId": { @@ -25187,7 +25244,7 @@ "CopyTagsToSnapshot": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

" + "smithy.api#documentation": "

Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

" } }, "MonitoringInterval": { @@ -25205,7 +25262,7 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.

\n

For more information about IAM database authentication, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

Specifies whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.

\n

For more information about IAM database authentication, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.\n

" } }, "SourceEngine": { @@ -25245,7 +25302,7 @@ "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to enable Performance Insights for the DB instance.

\n

For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

" + "smithy.api#documentation": "

Specifies whether to enable Performance Insights for the DB instance.

\n

For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

" } }, "PerformanceInsightsKMSKeyId": { @@ -25275,13 +25332,13 @@ "UseDefaultProcessorFeatures": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance class of the DB instance uses its default\n processor features.

" + "smithy.api#documentation": "

Specifies whether the DB instance class of the DB instance uses its default\n processor features.

" } }, "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance has deletion protection enabled. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled. For more information, see \n \n Deleting a DB Instance.

" + "smithy.api#documentation": "

Specifies whether to enable deletion protection for the DB instance. \n The database can't be deleted when deletion protection is enabled. By default, \n deletion protection isn't enabled. For more information, see \n \n Deleting a DB Instance.

" } }, "MaxAllocatedStorage": { @@ -25293,7 +25350,7 @@ "NetworkType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The network type of the DB instance.

\n

Valid values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB instance. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

The network type of the DB instance.

\n

Valid Values:

\n
    \n
  • \n

    \n IPV4\n

    \n
  • \n
  • \n

    \n DUAL\n

    \n
  • \n
\n

The network type is determined by the DBSubnetGroup specified for the DB instance. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon RDS User Guide.\n

" } }, "StorageThroughput": { @@ -25305,7 +25362,7 @@ "ManageMasterUserPassword": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

\n

For more information, see Password management with Amazon Web Services Secrets Manager \n in the Amazon RDS User Guide.\n

\n

Constraints:

\n
    \n
  • \n

    Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword \n is specified.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

\n

For more information, see Password management with Amazon Web Services Secrets Manager \n in the Amazon RDS User Guide.\n

\n

Constraints:

\n
    \n
  • \n

    Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword \n is specified.

    \n
  • \n
" } }, "MasterUserSecretKmsKeyId": { @@ -25313,6 +25370,12 @@ "traits": { "smithy.api#documentation": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and \n managed in Amazon Web Services Secrets Manager.

\n

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets \n Manager for the DB instance.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

\n

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager \n KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't \n use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer \n managed KMS key.

\n

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account\n has a different default KMS key for each Amazon Web Services Region.

" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to enable a dedicated log volume (DLV) for the DB instance.

" + } } }, "traits": { @@ -25694,7 +25757,7 @@ "UseDefaultProcessorFeatures": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the DB instance class of the DB instance uses its default processor features.

\n

This setting doesn't apply to RDS Custom.

" + "smithy.api#documentation": "

Specifies whether the DB instance class of the DB instance uses its default processor features.

\n

This setting doesn't apply to RDS Custom.

" } }, "DBParameterGroupName": { @@ -25762,6 +25825,12 @@ "traits": { "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } + }, + "DedicatedLogVolume": { + "target": "com.amazonaws.rds#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to enable a dedicated log volume (DLV) for the DB instance.

" + } } }, "traits": { @@ -25956,7 +26025,7 @@ "AutoPause": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode.\n A DB cluster can be paused only when it's idle (it has no connections).

\n \n

If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot.\n In this case, the DB cluster is restored when there is a request to connect to it.

\n
" + "smithy.api#documentation": "

Indicates whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode.\n A DB cluster can be paused only when it's idle (it has no connections).

\n \n

If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot.\n In this case, the DB cluster is restored when there is a request to connect to it.

\n
" } }, "SecondsUntilAutoPause": { @@ -26000,7 +26069,7 @@ "AutoPause": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether automatic pause is allowed for the Aurora DB cluster\n in serverless DB engine mode.

\n

When the value is set to false for an Aurora Serverless v1 DB cluster, the DB cluster automatically resumes.

" + "smithy.api#documentation": "

Indicates whether automatic pause is allowed for the Aurora DB cluster\n in serverless DB engine mode.

\n

When the value is set to false for an Aurora Serverless v1 DB cluster, the DB cluster automatically resumes.

" } }, "SecondsUntilAutoPause": { @@ -26185,7 +26254,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether the source Amazon Web Services Region supports replicating automated backups to the current Amazon Web Services Region.

" + "smithy.api#documentation": "

Indicates whether the source Amazon Web Services Region supports replicating automated backups to the current Amazon Web Services Region.

" } } }, @@ -26798,7 +26867,7 @@ "ExportOnly": { "target": "com.amazonaws.rds#StringList", "traits": { - "smithy.api#documentation": "

The data to be exported from the snapshot or cluster. \n If this parameter is not provided, all of the data is exported.\n Valid values are the following:

\n
    \n
  • \n

    \n database - Export all the data from a specified database.

    \n
  • \n
  • \n

    \n database.table\n table-name - \n Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL.

    \n
  • \n
  • \n

    \n database.schema\n schema-name - Export a database schema of the snapshot or cluster. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
  • \n

    \n database.schema.table\n table-name - Export a table of the database schema. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
" + "smithy.api#documentation": "

The data to be exported from the snapshot or cluster. \n If this parameter isn't provided, all of the data is exported.

\n

Valid Values:

\n
    \n
  • \n

    \n database - Export all the data from a specified database.

    \n
  • \n
  • \n

    \n database.table\n table-name - \n Export a table of the snapshot or cluster. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL.

    \n
  • \n
  • \n

    \n database.schema\n schema-name - Export a database schema of the snapshot or cluster. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
  • \n

    \n database.schema.table\n table-name - Export a table of the database schema. \n This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

    \n
  • \n
" } } }, @@ -27788,14 +27857,14 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

" + "smithy.api#documentation": "

Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

" } }, "IsMajorVersionUpgrade": { "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

A value that indicates whether upgrading to the target version requires upgrading the major version of the database engine.

" + "smithy.api#documentation": "

Indicates whether upgrading to the target version requires upgrading the major version of the database engine.

" } }, "SupportedEngineModes": { @@ -27807,25 +27876,25 @@ "SupportsParallelQuery": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether you can use Aurora parallel query with the target engine version.

" + "smithy.api#documentation": "

Indicates whether you can use Aurora parallel query with the target engine version.

" } }, "SupportsGlobalDatabases": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether you can use Aurora global databases with the target engine version.

" + "smithy.api#documentation": "

Indicates whether you can use Aurora global databases with the target engine version.

" } }, "SupportsBabelfish": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version.

" + "smithy.api#documentation": "

Indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version.

" } }, "SupportsLocalWriteForwarding": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether the target engine version supports forwarding write operations from reader DB instances \n to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

Indicates whether the target engine version supports forwarding write operations from reader DB instances \n to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

\n

Valid for: Aurora DB clusters only

" } } }, @@ -27863,7 +27932,7 @@ "IAMAuth": { "target": "com.amazonaws.rds#IAMAuthMode", "traits": { - "smithy.api#documentation": "

Whether to require or disallow Amazon Web Services Identity and Access Management (IAM) authentication for connections to the proxy. \n The ENABLED value is valid only for proxies with RDS for Microsoft SQL Server.

" + "smithy.api#documentation": "

A value that indicates whether to require or disallow Amazon Web Services Identity and Access Management (IAM) authentication for connections to the proxy. \n The ENABLED value is valid only for proxies with RDS for Microsoft SQL Server.

" } }, "ClientPasswordAuthType": { @@ -27947,6 +28016,13 @@ "traits": { "smithy.api#documentation": "

Valid processor features for your DB instance.

" } + }, + "SupportsDedicatedLogVolume": { + "target": "com.amazonaws.rds#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether a DB instance supports using a dedicated log volume (DLV).

" + } } }, "traits": { @@ -27984,7 +28060,7 @@ "target": "com.amazonaws.rds#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether or not Amazon RDS can automatically scale storage for DB instances that use the new instance class.

" + "smithy.api#documentation": "

Indicates whether or not Amazon RDS can automatically scale storage for DB instances that use the new instance class.

" } }, "ProvisionedStorageThroughput": { diff --git a/models/rekognition.json b/models/rekognition.json index 4693921e0c..3d74e55f6a 100644 --- a/models/rekognition.json +++ b/models/rekognition.json @@ -1217,7 +1217,7 @@ } ], "traits": { - "smithy.api#documentation": "

Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and\n destination projects can be in different AWS accounts but must be in the same AWS Region.\n You can't copy a model to another AWS service.\n \n

\n

To copy a model version to a different AWS account, you need to create a resource-based policy known as a\n project policy. You attach the project policy to the\n source project by calling PutProjectPolicy. The project policy\n gives permission to copy the model version from a trusting AWS account to a trusted account.

\n

For more information creating and attaching a project policy, see Attaching a project policy (SDK)\n in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

If you are copying a model version to a project in the same AWS account, you don't need to create a project policy.

\n \n

To copy a model, the destination project, source project, and source model version must already exist.

\n
\n

Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the\n ProjectVersionDescription object. The copy operation has finished when\n the value of Status is COPYING_COMPLETED.

\n

This operation requires permissions to perform the rekognition:CopyProjectVersion action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and\n destination projects can be in different AWS accounts but must be in the same AWS Region.\n You can't copy a model to another AWS service.\n \n

\n

To copy a model version to a different AWS account, you need to create a resource-based policy known as a\n project policy. You attach the project policy to the\n source project by calling PutProjectPolicy. The project policy\n gives permission to copy the model version from a trusting AWS account to a trusted account.

\n

For more information creating and attaching a project policy, see Attaching a project policy (SDK)\n in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

If you are copying a model version to a project in the same AWS account, you don't need to create a project policy.

\n \n

Copying project versions is supported only for Custom Labels models.

\n

To copy a model, the destination project, source project, and source model version\n must already exist.

\n
\n

Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the\n ProjectVersionDescription object. The copy operation has finished when\n the value of Status is COPYING_COMPLETED.

\n

This operation requires permissions to perform the rekognition:CopyProjectVersion action.

", "smithy.api#examples": [ { "title": "CopyProjectVersion", @@ -1466,7 +1466,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using\n an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset.

\n

To create a training dataset for a project, specify TRAIN for the value of \n DatasetType. To create the test dataset for a project,\n specify TEST for the value of DatasetType.\n

\n

The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset.\n Creating a dataset takes a while to complete. Use DescribeDataset to check the \n current status. The dataset created successfully if the value of Status is\n CREATE_COMPLETE.

\n

To check if any non-terminal errors occurred, call ListDatasetEntries\nand check for the presence of errors lists in the JSON Lines.

\n

Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). \n Currently, you can't access the terminal error information.\n \n

\n

For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide.

\n

This operation requires permissions to perform the rekognition:CreateDataset action.\n If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using\n an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset.

\n

To create a training dataset for a project, specify TRAIN for the value of \n DatasetType. To create the test dataset for a project,\n specify TEST for the value of DatasetType.\n

\n

The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset.\n Creating a dataset takes a while to complete. Use DescribeDataset to check the \n current status. The dataset created successfully if the value of Status is\n CREATE_COMPLETE.

\n

To check if any non-terminal errors occurred, call ListDatasetEntries\nand check for the presence of errors lists in the JSON Lines.

\n

Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). \n Currently, you can't access the terminal error information.\n \n

\n

For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide.

\n

This operation requires permissions to perform the rekognition:CreateDataset action.\n If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.

", "smithy.api#examples": [ { "title": "To create an Amazon Rekognition Custom Labels dataset", @@ -1655,7 +1655,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources (datasets, model versions) \n that you use to create and manage Amazon Rekognition Custom Labels models.

\n

This operation requires permissions to perform the rekognition:CreateProject action.

", + "smithy.api#documentation": "

Creates a new Amazon Rekognition project. A project is a group of resources (datasets, model\n versions) that you use to create and manage a Amazon Rekognition Custom Labels Model or custom adapter. You can\n specify a feature to create the project with, if no feature is specified then Custom Labels\n is used by default. For adapters, you can also choose whether or not to have the project\n auto update by using the AutoUpdate argument. This operation requires permissions to\n perform the rekognition:CreateProject action.

", "smithy.api#examples": [ { "title": "To create an Amazon Rekognition Custom Labels project", @@ -1679,6 +1679,18 @@ "smithy.api#documentation": "

The name of the project to create.

", "smithy.api#required": {} } + }, + "Feature": { + "target": "com.amazonaws.rekognition#CustomizationFeature", + "traits": { + "smithy.api#documentation": "

Specifies feature that is being customized. If no value is provided CUSTOM_LABELS is used as a default.

" + } + }, + "AutoUpdate": { + "target": "com.amazonaws.rekognition#ProjectAutoUpdate", + "traits": { + "smithy.api#documentation": "

Specifies whether automatic retraining should be attempted for the versions of the\n project. Automatic retraining is done as a best effort. Required argument for Content\n Moderation. Applicable only to adapters.

" + } } }, "traits": { @@ -1737,7 +1749,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new version of a model and begins training. \n Models are managed as part of an Amazon Rekognition Custom Labels project. \n The response from CreateProjectVersion\n is an Amazon Resource Name (ARN) for the version of the model.

\n

Training uses the training and test datasets associated with the project. \n For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide.\n

\n \n

You can train a model in a project that doesn't have associated datasets by specifying manifest files in the\n TrainingData and TestingData fields.\n

\n

If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates\n the datasets for you using the most recent manifest files. You can no longer train\n a model version for the project by specifying manifest files.

\n

Instead of training with a project without associated datasets,\n we recommend that you use the manifest\n files to create training and test datasets for the project.

\n
\n

Training takes a while to complete. You can get the current status by calling\n DescribeProjectVersions. Training completed successfully if\n the value of the Status field is TRAINING_COMPLETED.

\n

If training \n fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide.

\n

Once training has successfully completed, call DescribeProjectVersions to\n get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model\n in the Amazon Rekognition Custom Labels developers guide.\n

\n

After evaluating the model, you start the model\n by calling StartProjectVersion.

\n

This operation requires permissions to perform the rekognition:CreateProjectVersion action.

", + "smithy.api#documentation": "

Creates a new version of Amazon Rekognition project (like a Custom Labels model or a custom adapter)\n and begins training. Models and adapters are managed as part of a Rekognition project. The\n response from CreateProjectVersion is an Amazon Resource Name (ARN) for the\n project version.

\n

The FeatureConfig operation argument allows you to configure specific model or adapter\n settings. You can provide a description to the project version by using the\n VersionDescription argment. Training can take a while to complete. You can get the current\n status by calling DescribeProjectVersions. Training completed\n successfully if the value of the Status field is\n TRAINING_COMPLETED. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the\n model.

\n

This operation requires permissions to perform the\n rekognition:CreateProjectVersion action.

\n \n

\n The following applies only to projects with Amazon Rekognition Custom Labels as the chosen\n feature:\n

\n

You can train a model in a project that doesn't have associated datasets by specifying manifest files in the\n TrainingData and TestingData fields.\n

\n

If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates\n the datasets for you using the most recent manifest files. You can no longer train\n a model version for the project by specifying manifest files.

\n

Instead of training with a project without associated datasets,\n we recommend that you use the manifest\n files to create training and test datasets for the project.

\n
\n

", "smithy.api#examples": [ { "title": "To train an Amazon Rekognition Custom Labels model", @@ -1763,46 +1775,58 @@ "ProjectArn": { "target": "com.amazonaws.rekognition#ProjectArn", "traits": { - "smithy.api#documentation": "

The ARN of the Amazon Rekognition Custom Labels project that \n manages the model that you want to train.

", + "smithy.api#documentation": "

The ARN of the Amazon Rekognition project that will manage the project version you want to\n train.

", "smithy.api#required": {} } }, "VersionName": { "target": "com.amazonaws.rekognition#VersionName", "traits": { - "smithy.api#documentation": "

A name for the version of the model. This value must be unique.

", + "smithy.api#documentation": "

A name for the version of the project version. This value must be unique.

", "smithy.api#required": {} } }, "OutputConfig": { "target": "com.amazonaws.rekognition#OutputConfig", "traits": { - "smithy.api#documentation": "

The Amazon S3 bucket location to store the results of training.\n The S3 bucket can be in any AWS account as long as the caller has\n s3:PutObject permissions on the S3 bucket.

", + "smithy.api#documentation": "

The Amazon S3 bucket location to store the results of training. The bucket can be any S3\n bucket in your AWS account. You need s3:PutObject permission on the bucket.\n

", "smithy.api#required": {} } }, "TrainingData": { "target": "com.amazonaws.rekognition#TrainingData", "traits": { - "smithy.api#documentation": "

Specifies an external manifest that the services uses to train the model.\n If you specify TrainingData you must also specify TestingData.\n The project must not have any associated datasets.\n

" + "smithy.api#documentation": "

Specifies an external manifest that the services uses to train the project version.\n If you specify TrainingData you must also specify TestingData.\n The project must not have any associated datasets.

" } }, "TestingData": { "target": "com.amazonaws.rekognition#TestingData", "traits": { - "smithy.api#documentation": "

Specifies an external manifest that the service uses to test the model.\n If you specify TestingData you must also specify TrainingData.\n The project must not have any associated datasets.

" + "smithy.api#documentation": "

Specifies an external manifest that the service uses to test the project version. If\n you specify TestingData you must also specify TrainingData. The\n project must not have any associated datasets.

" } }, "Tags": { "target": "com.amazonaws.rekognition#TagMap", "traits": { - "smithy.api#documentation": "

A set of tags (key-value pairs) that you want to attach to the model.

" + "smithy.api#documentation": "

A set of tags (key-value pairs) that you want to attach to the project version.

" } }, "KmsKeyId": { "target": "com.amazonaws.rekognition#KmsKeyId", "traits": { - "smithy.api#documentation": "

The identifier for your AWS Key Management Service key (AWS KMS key).\n You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key,\n an alias for your KMS key, or an alias ARN.\n The key is used to encrypt training and test images copied into the service for model training.\n Your source images are unaffected. The key is also used to encrypt training results\n and manifest files written to the output Amazon S3 bucket (OutputConfig).

\n

If you choose to use your own KMS key, you need the following permissions on the KMS key.

\n
    \n
  • \n

    kms:CreateGrant

    \n
  • \n
  • \n

    kms:DescribeKey

    \n
  • \n
  • \n

    kms:GenerateDataKey

    \n
  • \n
  • \n

    kms:Decrypt

    \n
  • \n
\n

If you don't specify a value for KmsKeyId, images copied into the service are encrypted\n using a key that AWS owns and manages.

" + "smithy.api#documentation": "

The identifier for your AWS Key Management Service key (AWS KMS key). You can supply\n the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias for\n your KMS key, or an alias ARN. The key is used to encrypt training images, test images, and manifest files copied\n into the service for the project version. Your source images are unaffected. The\n key is also used to encrypt training results and manifest files written to the output Amazon S3\n bucket (OutputConfig).

\n

If you choose to use your own KMS key, you need the following permissions on the KMS key.

\n
    \n
  • \n

    kms:CreateGrant

    \n
  • \n
  • \n

    kms:DescribeKey

    \n
  • \n
  • \n

    kms:GenerateDataKey

    \n
  • \n
  • \n

    kms:Decrypt

    \n
  • \n
\n

If you don't specify a value for KmsKeyId, images copied into the service are encrypted\n using a key that AWS owns and manages.

" + } + }, + "VersionDescription": { + "target": "com.amazonaws.rekognition#VersionDescription", + "traits": { + "smithy.api#documentation": "

A description applied to the project version being created.

" + } + }, + "FeatureConfig": { + "target": "com.amazonaws.rekognition#CustomizationFeatureConfig", + "traits": { + "smithy.api#documentation": "

Feature-specific configuration of the training job. If the job configuration does not match the feature type associated with the project, an InvalidParameterException is returned.

" } } }, @@ -1816,7 +1840,7 @@ "ProjectVersionArn": { "target": "com.amazonaws.rekognition#ProjectVersionArn", "traits": { - "smithy.api#documentation": "

The ARN of the model version that was created. Use DescribeProjectVersion\n to get the current status of the training operation.

" + "smithy.api#documentation": "

The ARN of the model or the project version that was created. Use\n DescribeProjectVersion to get the current status of the training\n operation.

" } } }, @@ -2065,6 +2089,63 @@ "target": "com.amazonaws.rekognition#CustomLabel" } }, + "com.amazonaws.rekognition#CustomizationFeature": { + "type": "enum", + "members": { + "CONTENT_MODERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTENT_MODERATION" + } + }, + "CUSTOM_LABELS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM_LABELS" + } + } + } + }, + "com.amazonaws.rekognition#CustomizationFeatureConfig": { + "type": "structure", + "members": { + "ContentModeration": { + "target": "com.amazonaws.rekognition#CustomizationFeatureContentModerationConfig", + "traits": { + "smithy.api#documentation": "

Configuration options for Custom Moderation training.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Feature specific configuration for the training job. Configuration provided for the job must match \n the feature type parameter associated with project. If configuration \n and feature type do not match an InvalidParameterException is returned.

" + } + }, + "com.amazonaws.rekognition#CustomizationFeatureContentModerationConfig": { + "type": "structure", + "members": { + "ConfidenceThreshold": { + "target": "com.amazonaws.rekognition#Percent", + "traits": { + "smithy.api#documentation": "

The confidence level you plan to use to identify if unsafe content is present during inference.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration options for Content Moderation training.

" + } + }, + "com.amazonaws.rekognition#CustomizationFeatures": { + "type": "list", + "member": { + "target": "com.amazonaws.rekognition#CustomizationFeature" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, "com.amazonaws.rekognition#DatasetArn": { "type": "string", "traits": { @@ -2524,7 +2605,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing Amazon Rekognition Custom Labels dataset.\n Deleting a dataset might take while. Use DescribeDataset to check the \n current status. The dataset is still deleting if the value of Status is\n DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get\n a ResourceNotFoundException exception.\n\n

\n

You can't delete a dataset while it is creating (Status = CREATE_IN_PROGRESS)\n or if the dataset is updating (Status = UPDATE_IN_PROGRESS).

\n

This operation requires permissions to perform the rekognition:DeleteDataset action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Deletes an existing Amazon Rekognition Custom Labels dataset.\n Deleting a dataset might take while. Use DescribeDataset to check the \n current status. The dataset is still deleting if the value of Status is\n DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get\n a ResourceNotFoundException exception.\n\n

\n

You can't delete a dataset while it is creating (Status = CREATE_IN_PROGRESS)\n or if the dataset is updating (Status = UPDATE_IN_PROGRESS).

\n

This operation requires permissions to perform the rekognition:DeleteDataset action.

", "smithy.api#examples": [ { "title": "To delete an Amazon Rekognition Custom Labels dataset", @@ -2682,7 +2763,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all models associated \n with the project. To delete a model, see DeleteProjectVersion.

\n

\n DeleteProject is an asynchronous operation. To check if the project is\n deleted, call DescribeProjects. The project is deleted when the project\n no longer appears in the response. Be aware that deleting a given project will also delete\n any ProjectPolicies associated with that project.

\n

This operation requires permissions to perform the\n rekognition:DeleteProject action.

", + "smithy.api#documentation": "

Deletes a Amazon Rekognition project. To delete a project you must first delete all models or\n adapters associated with the project. To delete a model or adapter, see DeleteProjectVersion.

\n

\n DeleteProject is an asynchronous operation. To check if the project is\n deleted, call DescribeProjects. The project is deleted when the project\n no longer appears in the response. Be aware that deleting a given project will also delete\n any ProjectPolicies associated with that project.

\n

This operation requires permissions to perform the\n rekognition:DeleteProject action.

", "smithy.api#examples": [ { "title": "To delete an Amazon Rekognition Custom Labels project", @@ -2729,7 +2810,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing project policy.

\n

To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy.

\n

This operation requires permissions to perform the rekognition:DeleteProjectPolicy action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Deletes an existing project policy.

\n

To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy.

\n

This operation requires permissions to perform the rekognition:DeleteProjectPolicy action.

", "smithy.api#examples": [ { "title": "DeleteProjectPolicy", @@ -2840,7 +2921,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon Rekognition Custom Labels model.

\n

You can't delete a model if it is running or if it is training. \n To check the status of a model, use the Status field returned\n from DescribeProjectVersions.\n To stop a running model call StopProjectVersion. If the model\n is training, wait until it finishes.

\n

This operation requires permissions to perform the\n rekognition:DeleteProjectVersion action.

", + "smithy.api#documentation": "

Deletes a Rekognition project model or project version, like a Amazon Rekognition Custom Labels model or a custom\n adapter.

\n

You can't delete a project version if it is running or if it is training. To check\n the status of a project version, use the Status field returned from DescribeProjectVersions. To stop a project version call StopProjectVersion. If the project version is training, wait until it\n finishes.

\n

This operation requires permissions to perform the\n rekognition:DeleteProjectVersion action.

", "smithy.api#examples": [ { "title": "To delete an Amazon Rekognition Custom Labels model", @@ -2861,7 +2942,7 @@ "ProjectVersionArn": { "target": "com.amazonaws.rekognition#ProjectVersionArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model version that you want to delete.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the project version that you want to\n delete.

", "smithy.api#required": {} } } @@ -3140,7 +3221,7 @@ } ], "traits": { - "smithy.api#documentation": "

\nDescribes an Amazon Rekognition Custom Labels dataset. You can get information such as the current status of a dataset and\nstatistics about the images and labels in a dataset. \n

\n

This operation requires permissions to perform the rekognition:DescribeDataset action.

" + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

\nDescribes an Amazon Rekognition Custom Labels dataset. You can get information such as the current status of a dataset and\nstatistics about the images and labels in a dataset. \n

\n

This operation requires permissions to perform the rekognition:DescribeDataset action.

" } }, "com.amazonaws.rekognition#DescribeDatasetRequest": { @@ -3204,7 +3285,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists and describes the versions of a model in an Amazon Rekognition Custom Labels project. You \n can specify up to 10 model versions in ProjectVersionArns. If\n you don't specify a value, descriptions for all model versions in the project are returned.

\n

This operation requires permissions to perform the rekognition:DescribeProjectVersions\n action.

", + "smithy.api#documentation": "

Lists and describes the versions of an Amazon Rekognition project. You can specify up to 10 model or\n adapter versions in ProjectVersionArns. If you don't specify a value,\n descriptions for all model/adapter versions in the project are returned.

\n

This operation requires permissions to perform the rekognition:DescribeProjectVersions\n action.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3273,20 +3354,20 @@ "ProjectArn": { "target": "com.amazonaws.rekognition#ProjectArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the project that contains the models you want to describe.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the project that contains the model/adapter you want\n to describe.

", "smithy.api#required": {} } }, "VersionNames": { "target": "com.amazonaws.rekognition#VersionNames", "traits": { - "smithy.api#documentation": "

A list of model version names that you want to describe. You can add up to 10 model version names\n to the list. If you don't specify a value, all model descriptions are returned. A version name is part of a\n model (ProjectVersion) ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the following ARN.\n arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123.

" + "smithy.api#documentation": "

A list of model or project version names that you want to describe. You can add\n up to 10 model or project version names to the list. If you don't specify a value, all\n project version descriptions are returned. A version name is part of a project version ARN. For example, my-model.2020-01-21T09.10.15 is\n the version name in the following ARN.\n arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123.

" } }, "NextToken": { "target": "com.amazonaws.rekognition#ExtendedPaginationToken", "traits": { - "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of results.

" + "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of results.

" } }, "MaxResults": { @@ -3306,13 +3387,13 @@ "ProjectVersionDescriptions": { "target": "com.amazonaws.rekognition#ProjectVersionDescriptions", "traits": { - "smithy.api#documentation": "

A list of model descriptions. The list is sorted by the creation date and time of\n the model versions, latest to earliest.

" + "smithy.api#documentation": "

A list of project version descriptions. The list is sorted by the creation date and\n time of the project versions, latest to earliest.

" } }, "NextToken": { "target": "com.amazonaws.rekognition#ExtendedPaginationToken", "traits": { - "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of results.

" + "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of results.

" } } }, @@ -3349,7 +3430,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about your Amazon Rekognition Custom Labels projects.

\n

This operation requires permissions to perform the rekognition:DescribeProjects action.

", + "smithy.api#documentation": "

Gets information about your Rekognition projects.

\n

This operation requires permissions to perform the rekognition:DescribeProjects action.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3364,7 +3445,7 @@ "NextToken": { "target": "com.amazonaws.rekognition#ExtendedPaginationToken", "traits": { - "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination \n token to retrieve the next set of results.

" + "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Rekognition returns a pagination token in the response. You can use this pagination \n token to retrieve the next set of results.

" } }, "MaxResults": { @@ -3376,7 +3457,13 @@ "ProjectNames": { "target": "com.amazonaws.rekognition#ProjectNames", "traits": { - "smithy.api#documentation": "

A list of the projects that you want Amazon Rekognition Custom Labels to describe. If you don't specify a value, \n the response includes descriptions for all the projects in your AWS account.

" + "smithy.api#documentation": "

A list of the projects that you want Rekognition to describe. If you don't specify a value, \n the response includes descriptions for all the projects in your AWS account.

" + } + }, + "Features": { + "target": "com.amazonaws.rekognition#CustomizationFeatures", + "traits": { + "smithy.api#documentation": "

Specifies the type of customization to filter projects by. If no value is specified, \n CUSTOM_LABELS is used as a default.

" } } }, @@ -3396,7 +3483,7 @@ "NextToken": { "target": "com.amazonaws.rekognition#ExtendedPaginationToken", "traits": { - "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition Custom Labels returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of results.

" + "smithy.api#documentation": "

If the previous response was incomplete (because there is more\n results to retrieve), Amazon Rekognition returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of results.

" } } }, @@ -3584,7 +3671,7 @@ } ], "traits": { - "smithy.api#documentation": "

Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

\n

You specify which version of a model version to use by using the ProjectVersionArn input\n parameter.

\n

You pass the input image as base64-encoded image bytes or as a reference to an image in\n an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing\n image bytes is not supported. The image must be either a PNG or JPEG formatted file.

\n

For each object that the model version detects on an image, the API returns a \n (CustomLabel) object in an array (CustomLabels).\n Each CustomLabel object provides the label name (Name), the level\n of confidence that the image contains the object (Confidence), and \n object location information, if it exists, for the label on the image (Geometry).

\n

To filter labels that are returned, specify a value for MinConfidence.\n DetectCustomLabelsLabels only returns labels with a confidence that's higher than\n the specified value.\n\n The value of MinConfidence maps to the assumed threshold values\n created during training. For more information, see Assumed threshold\n in the Amazon Rekognition Custom Labels Developer Guide. \n Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of\n MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence\n responses from DetectCustomLabels are also returned as a percentage. \n You can use MinConfidence to change the precision and recall or your model. \n For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

\n

If you don't specify a value for MinConfidence, DetectCustomLabels\n returns labels based on the assumed threshold of each label.

\n

This is a stateless API operation. That is, the operation does not persist any\n data.

\n

This operation requires permissions to perform the\n rekognition:DetectCustomLabels action.

\n

For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

\n

You specify which version of a model version to use by using the ProjectVersionArn input\n parameter.

\n

You pass the input image as base64-encoded image bytes or as a reference to an image in\n an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing\n image bytes is not supported. The image must be either a PNG or JPEG formatted file.

\n

For each object that the model version detects on an image, the API returns a \n (CustomLabel) object in an array (CustomLabels).\n Each CustomLabel object provides the label name (Name), the level\n of confidence that the image contains the object (Confidence), and \n object location information, if it exists, for the label on the image (Geometry).

\n

To filter labels that are returned, specify a value for MinConfidence.\n DetectCustomLabelsLabels only returns labels with a confidence that's higher than\n the specified value.\n\n The value of MinConfidence maps to the assumed threshold values\n created during training. For more information, see Assumed threshold\n in the Amazon Rekognition Custom Labels Developer Guide. \n Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of\n MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence\n responses from DetectCustomLabels are also returned as a percentage. \n You can use MinConfidence to change the precision and recall or your model. \n For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

\n

If you don't specify a value for MinConfidence, DetectCustomLabels\n returns labels based on the assumed threshold of each label.

\n

This is a stateless API operation. That is, the operation does not persist any\n data.

\n

This operation requires permissions to perform the\n rekognition:DetectCustomLabels action.

\n

For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

", "smithy.api#examples": [ { "title": "To detect custom labels in an image with an Amazon Rekognition Custom Labels model", @@ -3622,7 +3709,7 @@ "ProjectVersionArn": { "target": "com.amazonaws.rekognition#ProjectVersionArn", "traits": { - "smithy.api#documentation": "

The ARN of the model version that you want to use.

", + "smithy.api#documentation": "

The ARN of the model version that you want to use. Only models associated with Custom\n Labels projects accepted by the operation. If a provided ARN refers to a model version\n associated with a project for a different feature type, then an InvalidParameterException\n is returned.

", "smithy.api#required": {} } }, @@ -4148,12 +4235,18 @@ { "target": "com.amazonaws.rekognition#ProvisionedThroughputExceededException" }, + { + "target": "com.amazonaws.rekognition#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rekognition#ResourceNotReadyException" + }, { "target": "com.amazonaws.rekognition#ThrottlingException" } ], "traits": { - "smithy.api#documentation": "

Detects unsafe content in a specified JPEG or PNG format image. Use\n DetectModerationLabels to moderate images depending on your requirements. For\n example, you might want to filter images that contain nudity, but not images containing\n suggestive content.

\n

To filter images, use the labels returned by DetectModerationLabels to\n determine which types of content are appropriate.

\n

For information about moderation labels, see Detecting Unsafe Content in the\n Amazon Rekognition Developer Guide.

\n

You pass the input image either as base64-encoded image bytes or as a reference to an\n image in an Amazon S3 bucket. If you use the\n AWS\n CLI to call Amazon Rekognition operations, passing image bytes is not\n supported. The image must be either a PNG or JPEG formatted file.

" + "smithy.api#documentation": "

Detects unsafe content in a specified JPEG or PNG format image. Use\n DetectModerationLabels to moderate images depending on your requirements. For\n example, you might want to filter images that contain nudity, but not images containing\n suggestive content.

\n

To filter images, use the labels returned by DetectModerationLabels to\n determine which types of content are appropriate.

\n

For information about moderation labels, see Detecting Unsafe Content in the\n Amazon Rekognition Developer Guide.

\n

You pass the input image either as base64-encoded image bytes or as a reference to an\n image in an Amazon S3 bucket. If you use the\n AWS\n CLI to call Amazon Rekognition operations, passing image bytes is not\n supported. The image must be either a PNG or JPEG formatted file.

\n

You can specify an adapter to use when retrieving label predictions by providing a\n ProjectVersionArn to the ProjectVersion argument.

" } }, "com.amazonaws.rekognition#DetectModerationLabelsRequest": { @@ -4177,6 +4270,12 @@ "traits": { "smithy.api#documentation": "

Sets up the configuration for human evaluation, including the FlowDefinition the image\n will be sent to.

" } + }, + "ProjectVersion": { + "target": "com.amazonaws.rekognition#ProjectVersionId", + "traits": { + "smithy.api#documentation": "

Identifier for the custom adapter. Expects the ProjectVersionArn as a value. \n Use the CreateProject or CreateProjectVersion APIs to create a custom adapter.

" + } } }, "traits": { @@ -4195,7 +4294,7 @@ "ModerationModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

Version number of the moderation detection model that was used to detect unsafe\n content.

" + "smithy.api#documentation": "

Version number of the base moderation detection model that was used to detect unsafe\n content.

" } }, "HumanLoopActivationOutput": { @@ -4203,6 +4302,12 @@ "traits": { "smithy.api#documentation": "

Shows the results of the human in the loop evaluation.

" } + }, + "ProjectVersion": { + "target": "com.amazonaws.rekognition#ProjectVersionId", + "traits": { + "smithy.api#documentation": "

Identifier of the custom adapter that was used during inference. If\n during inference the adapter was EXPIRED, then the parameter will not be returned,\n indicating that a base moderation detection project version was used.

" + } } }, "traits": { @@ -4621,7 +4726,7 @@ } ], "traits": { - "smithy.api#documentation": "

Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project.\n DistributeDatasetEntries moves 20% of the training dataset images to the test dataset.\n An entry is a JSON Line that describes an image.\n

\n

You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. \n The training dataset must contain the images that you want to split. The test dataset \n must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset.

\n

Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation\n is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. \n If the dataset split fails, the value of Status is UPDATE_FAILED.

\n

This operation requires permissions to perform the rekognition:DistributeDatasetEntries action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project.\n DistributeDatasetEntries moves 20% of the training dataset images to the test dataset.\n An entry is a JSON Line that describes an image.\n

\n

You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. \n The training dataset must contain the images that you want to split. The test dataset \n must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset.

\n

Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation\n is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. \n If the dataset split fails, the value of Status is UPDATE_FAILED.

\n

This operation requires permissions to perform the rekognition:DistributeDatasetEntries action.

", "smithy.api#examples": [ { "title": "To distribute an Amazon Rekognition Custom Labels dataset", @@ -7954,7 +8059,7 @@ } }, "traits": { - "smithy.api#documentation": "

An Amazon Rekognition service limit was exceeded. For example, if you start too many Amazon Rekognition Video jobs concurrently, calls to start operations \n (StartLabelDetection, for example) will raise a LimitExceededException exception (HTTP status code: 400) until\n the number of concurrently running jobs is below the Amazon Rekognition service limit.

", + "smithy.api#documentation": "

An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs\n concurrently, subsequent calls to start operations (ex:\n StartLabelDetection) will raise a LimitExceededException\n exception (HTTP status code: 400) until the number of concurrently running jobs is below\n the Amazon Rekognition service limit.

", "smithy.api#error": "client" } }, @@ -8094,7 +8199,7 @@ } ], "traits": { - "smithy.api#documentation": "

\nLists the entries (images) within a dataset. An entry is a\nJSON Line that contains the information for a single image, including\nthe image location, assigned labels, and object location bounding boxes. For \nmore information, see Creating a manifest file.

\n

JSON Lines in the response include information about non-terminal\n errors found in the dataset. \n Non terminal errors are reported in errors lists within each JSON Line. The\n same information is reported in the training and testing validation result manifests that\n Amazon Rekognition Custom Labels creates during model training.\n

\n

You can filter the response in variety of ways, such as choosing which labels to return and returning JSON Lines created after a specific date.\n

\n

This operation requires permissions to perform the rekognition:ListDatasetEntries action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

\nLists the entries (images) within a dataset. An entry is a\nJSON Line that contains the information for a single image, including\nthe image location, assigned labels, and object location bounding boxes. For \nmore information, see Creating a manifest file.

\n

JSON Lines in the response include information about non-terminal\n errors found in the dataset. \n Non terminal errors are reported in errors lists within each JSON Line. The\n same information is reported in the training and testing validation result manifests that\n Amazon Rekognition Custom Labels creates during model training.\n

\n

You can filter the response in variety of ways, such as choosing which labels to return and returning JSON Lines created after a specific date.\n

\n

This operation requires permissions to perform the rekognition:ListDatasetEntries action.

", "smithy.api#examples": [ { "title": "To list the entries in an Amazon Rekognition Custom Labels dataset", @@ -8244,7 +8349,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see \n Labeling images.\n

\n

\n Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images\n in the Amazon Rekognition Custom Labels Developer Guide.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see \n Labeling images.\n

\n

\n Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images\n in the Amazon Rekognition Custom Labels Developer Guide.

", "smithy.api#examples": [ { "title": "To list the entries in an Amazon Rekognition Custom Labels dataset", @@ -8532,7 +8637,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a list of the project policies attached to a project.

\n

To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy.

\n

This operation requires permissions to perform the rekognition:ListProjectPolicies action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Gets a list of the project policies attached to a project.

\n

To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy.

\n

This operation requires permissions to perform the rekognition:ListProjectPolicies action.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -9397,6 +9502,23 @@ "smithy.api#pattern": "^(^arn:[a-z\\d-]+:rekognition:[a-z\\d-]+:\\d{12}:project\\/[a-zA-Z0-9_.\\-]{1,255}\\/[0-9]+$)$" } }, + "com.amazonaws.rekognition#ProjectAutoUpdate": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.rekognition#ProjectDescription": { "type": "structure", "members": { @@ -9423,6 +9545,18 @@ "traits": { "smithy.api#documentation": "

\n Information about the training and test datasets in the project.\n

" } + }, + "Feature": { + "target": "com.amazonaws.rekognition#CustomizationFeature", + "traits": { + "smithy.api#documentation": "

Specifies the project that is being customized.

" + } + }, + "AutoUpdate": { + "target": "com.amazonaws.rekognition#ProjectAutoUpdate", + "traits": { + "smithy.api#documentation": "

Indicates whether automatic retraining will be attempted for the versions of the project. Applies only to adapters.

" + } } }, "traits": { @@ -9576,7 +9710,7 @@ "ProjectVersionArn": { "target": "com.amazonaws.rekognition#ProjectVersionArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model version.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the project version.

" } }, "CreationTimestamp": { @@ -9588,7 +9722,7 @@ "MinInferenceUnits": { "target": "com.amazonaws.rekognition#InferenceUnits", "traits": { - "smithy.api#documentation": "

The minimum number of inference units used by the model. For more information,\n see StartProjectVersion.

" + "smithy.api#documentation": "

The minimum number of inference units used by the model. Applies only to Custom Labels\n projects. For more information, see StartProjectVersion.

" } }, "Status": { @@ -9654,7 +9788,7 @@ "MaxInferenceUnits": { "target": "com.amazonaws.rekognition#InferenceUnits", "traits": { - "smithy.api#documentation": "

The maximum number of inference units Amazon Rekognition Custom Labels uses to auto-scale the model.\n For more information, see StartProjectVersion.

" + "smithy.api#documentation": "

The maximum number of inference units Amazon Rekognition uses to auto-scale the model. Applies\n only to Custom Labels projects. For more information, see StartProjectVersion.

" } }, "SourceProjectVersionArn": { @@ -9662,10 +9796,34 @@ "traits": { "smithy.api#documentation": "

If the model version was copied from a different project, SourceProjectVersionArn contains the ARN of the source model version.

" } + }, + "VersionDescription": { + "target": "com.amazonaws.rekognition#VersionDescription", + "traits": { + "smithy.api#documentation": "

A user-provided description of the project version.

" + } + }, + "Feature": { + "target": "com.amazonaws.rekognition#CustomizationFeature", + "traits": { + "smithy.api#documentation": "

The feature that was customized.

" + } + }, + "BaseModelVersion": { + "target": "com.amazonaws.rekognition#String", + "traits": { + "smithy.api#documentation": "

The base detection model version used to create the project version.

" + } + }, + "FeatureConfig": { + "target": "com.amazonaws.rekognition#CustomizationFeatureConfig", + "traits": { + "smithy.api#documentation": "

Feature specific configuration that was applied during training.

" + } } }, "traits": { - "smithy.api#documentation": "

A description of a version of an Amazon Rekognition Custom Labels model.

" + "smithy.api#documentation": "

A description of a version of a Amazon Rekognition project version.

" } }, "com.amazonaws.rekognition#ProjectVersionDescriptions": { @@ -9674,6 +9832,16 @@ "target": "com.amazonaws.rekognition#ProjectVersionDescription" } }, + "com.amazonaws.rekognition#ProjectVersionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^(^arn:[a-z\\d-]+:rekognition:[a-z\\d-]+:\\d{12}:project\\/[a-zA-Z0-9_.\\-]{1,255}\\/version\\/[a-zA-Z0-9_.\\-]{1,255}\\/[0-9]+$)$" + } + }, "com.amazonaws.rekognition#ProjectVersionStatus": { "type": "enum", "members": { @@ -9748,6 +9916,18 @@ "traits": { "smithy.api#enumValue": "COPYING_FAILED" } + }, + "DEPRECATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEPRECATED" + } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } } } }, @@ -9981,7 +10161,7 @@ } ], "traits": { - "smithy.api#documentation": "

Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A\n project policy specifies that a trusted AWS account can copy a model version from a\n trusting AWS account to a project in the trusted AWS account. To copy a model version you use\n the CopyProjectVersion operation.

\n

For more information about the format of a project policy document, see Attaching a project policy (SDK)\n in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

The response from PutProjectPolicy is a revision ID for the project policy.\n You can attach multiple project policies to a project. You can also update an existing\n project policy by specifying the policy revision ID of the existing policy.

\n

To remove a project policy from a project, call DeleteProjectPolicy.\n To get a list of project policies attached to a project, call ListProjectPolicies.

\n

You copy a model version by calling CopyProjectVersion.

\n

This operation requires permissions to perform the rekognition:PutProjectPolicy action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A\n project policy specifies that a trusted AWS account can copy a model version from a\n trusting AWS account to a project in the trusted AWS account. To copy a model version\n you use the CopyProjectVersion operation. Only applies to Custom Labels\n projects.

\n

For more information about the format of a project policy document, see Attaching a project policy (SDK)\n in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

The response from PutProjectPolicy is a revision ID for the project policy.\n You can attach multiple project policies to a project. You can also update an existing\n project policy by specifying the policy revision ID of the existing policy.

\n

To remove a project policy from a project, call DeleteProjectPolicy.\n To get a list of project policies attached to a project, call ListProjectPolicies.

\n

You copy a model version by calling CopyProjectVersion.

\n

This operation requires permissions to perform the rekognition:PutProjectPolicy action.

", "smithy.api#examples": [ { "title": "PutProjectPolicy", @@ -13054,7 +13234,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts the running of the version of a model. Starting a model takes a while\n to complete. To check the current state of the model, use DescribeProjectVersions.

\n

Once the model is running, you can detect custom labels in new images by calling \n DetectCustomLabels.

\n \n

You are charged for the amount of time that the model is running. To stop a running\n model, call StopProjectVersion.

\n
\n

For more information, see Running a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels Guide.

\n

This operation requires permissions to perform the \n rekognition:StartProjectVersion action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Starts the running of the version of a model. Starting a model takes a while to\n complete. To check the current state of the model, use DescribeProjectVersions.

\n

Once the model is running, you can detect custom labels in new images by calling \n DetectCustomLabels.

\n \n

You are charged for the amount of time that the model is running. To stop a running\n model, call StopProjectVersion.

\n
\n

This operation requires permissions to perform the \n rekognition:StartProjectVersion action.

", "smithy.api#examples": [ { "title": "To start an Amazon Rekognition Custom Labels model", @@ -13084,7 +13264,7 @@ "MinInferenceUnits": { "target": "com.amazonaws.rekognition#InferenceUnits", "traits": { - "smithy.api#documentation": "

The minimum number of inference units to use. A single\n inference unit represents 1 hour of processing.

\n

For information about the number \n of transactions per second (TPS) that an inference unit can support, see \n Running a trained Amazon Rekognition Custom Labels model in the \n Amazon Rekognition Custom Labels Guide.\n

\n

Use a higher number to increase the TPS throughput of your model. You are charged for the number\n of inference units that you use.\n

", + "smithy.api#documentation": "

The minimum number of inference units to use. A single\n inference unit represents 1 hour of processing.

\n

Use a higher number to increase the TPS throughput of your model. You are charged for the number\n of inference units that you use.\n

", "smithy.api#required": {} } }, @@ -13493,7 +13673,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a running model. The operation might take a while to complete. To\n check the current status, call DescribeProjectVersions.

\n

This operation requires permissions to perform the rekognition:StopProjectVersion action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Stops a running model. The operation might take a while to complete. To check the\n current status, call DescribeProjectVersions. Only applies to Custom\n Labels projects.

\n

This operation requires permissions to perform the rekognition:StopProjectVersion action.

", "smithy.api#examples": [ { "title": "To stop an Amazon Rekognition Custom Labels model.", @@ -13514,7 +13694,7 @@ "ProjectVersionArn": { "target": "com.amazonaws.rekognition#ProjectVersionArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model version that you want to delete.

\n

This operation requires permissions to perform the rekognition:StopProjectVersion action.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model version that you want to stop.

\n

This operation requires permissions to perform the rekognition:StopProjectVersion action.

", "smithy.api#required": {} } } @@ -14060,12 +14240,12 @@ "target": "com.amazonaws.rekognition#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

If specified, Amazon Rekognition Custom Labels temporarily splits the training dataset (80%) to create a test dataset (20%) for the training job.\n After training completes, the test dataset is not stored and the training dataset reverts to its previous size.

" + "smithy.api#documentation": "

If specified, Rekognition splits training dataset to create a test dataset for\n the training job.

" } } }, "traits": { - "smithy.api#documentation": "

The dataset used for testing. Optionally, if AutoCreate is set, Amazon Rekognition Custom Labels uses the\n training dataset to create a test dataset with a temporary split of the training dataset.

" + "smithy.api#documentation": "

The dataset used for testing. Optionally, if AutoCreate is set, Amazon Rekognition uses the\n training dataset to create a test dataset with a temporary split of the training dataset.

" } }, "com.amazonaws.rekognition#TestingDataResult": { @@ -14091,7 +14271,7 @@ } }, "traits": { - "smithy.api#documentation": "

Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.

" + "smithy.api#documentation": "

Sagemaker Groundtruth format manifest files for the input, output and validation\n datasets that are used and created during testing.

" } }, "com.amazonaws.rekognition#TextDetection": { @@ -14224,7 +14404,7 @@ "Assets": { "target": "com.amazonaws.rekognition#Assets", "traits": { - "smithy.api#documentation": "

A Sagemaker GroundTruth manifest file that contains the training images (assets).

" + "smithy.api#documentation": "

A manifest file that contains references to the training images and ground-truth\n annotations.

" } } }, @@ -14238,24 +14418,24 @@ "Input": { "target": "com.amazonaws.rekognition#TrainingData", "traits": { - "smithy.api#documentation": "

The training assets that you supplied for training.

" + "smithy.api#documentation": "

The training data that you supplied.

" } }, "Output": { "target": "com.amazonaws.rekognition#TrainingData", "traits": { - "smithy.api#documentation": "

The images (assets) that were actually trained by Amazon Rekognition Custom Labels.

" + "smithy.api#documentation": "

Reference to images (assets) that were actually used during training with trained model\n predictions.

" } }, "Validation": { "target": "com.amazonaws.rekognition#ValidationData", "traits": { - "smithy.api#documentation": "

The location of the data validation manifest. The data validation manifest is created for the training dataset during model training.

" + "smithy.api#documentation": "

A manifest that you supplied for training, with validation results for each\n line.

" } } }, "traits": { - "smithy.api#documentation": "

Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.

" + "smithy.api#documentation": "

The data \n validation manifest created for the training dataset during model training.

" } }, "com.amazonaws.rekognition#UInteger": { @@ -14673,7 +14853,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the\n information for a single image, including\n the image location, assigned labels, and object location bounding boxes. For more information, \n see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

If the source-ref field in the JSON line references an existing image, the existing image in the dataset\n is updated. \n If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset.

\n

You specify the changes that you want to make in the Changes input parameter. \n There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less\nthan 5MB.

\n

\n UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete.\n Use DescribeDataset to check the \n current status. The dataset updated successfully if the value of Status is\n UPDATE_COMPLETE.

\n

To check if any non-terminal errors occured, call ListDatasetEntries\n and check for the presence of errors lists in the JSON Lines.

\n

Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). \n Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK.\n

\n

This operation requires permissions to perform the rekognition:UpdateDatasetEntries action.

", + "smithy.api#documentation": "\n

This operation applies only to Amazon Rekognition Custom Labels.

\n
\n

Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the\n information for a single image, including\n the image location, assigned labels, and object location bounding boxes. For more information, \n see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

If the source-ref field in the JSON line references an existing image, the existing image in the dataset\n is updated. \n If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset.

\n

You specify the changes that you want to make in the Changes input parameter. \n There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less\nthan 5MB.

\n

\n UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete.\n Use DescribeDataset to check the \n current status. The dataset updated successfully if the value of Status is\n UPDATE_COMPLETE.

\n

To check if any non-terminal errors occured, call ListDatasetEntries\n and check for the presence of errors lists in the JSON Lines.

\n

Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). \n Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK.\n

\n

This operation requires permissions to perform the rekognition:UpdateDatasetEntries action.

", "smithy.api#examples": [ { "title": "To-add dataset entries to an Amazon Rekognition Custom Labels dataset", @@ -14943,6 +15123,16 @@ "smithy.api#documentation": "

Contains the Amazon S3 bucket location of the validation data for a model training job.

\n

The validation data includes error information for individual JSON Lines in the dataset.\n For more information, see Debugging a Failed Model Training in the\n Amazon Rekognition Custom Labels Developer Guide.

\n

You get the ValidationData object for the training dataset (TrainingDataResult)\n and the test dataset (TestingDataResult) by calling DescribeProjectVersions.

\n

The assets array contains a single Asset object. \n The GroundTruthManifest field of the Asset object contains the S3 bucket location of\n the validation data. \n

" } }, + "com.amazonaws.rekognition#VersionDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-_. ()':,;?]+$" + } + }, "com.amazonaws.rekognition#VersionName": { "type": "string", "traits": { diff --git a/models/route-53.json b/models/route-53.json index cb171eb7c4..d5fbce4bb3 100644 --- a/models/route-53.json +++ b/models/route-53.json @@ -2010,7 +2010,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates, changes, or deletes a resource record set, which contains authoritative DNS\n\t\t\tinformation for a specified domain name or subdomain name. For example, you can use\n\t\t\t\tChangeResourceRecordSets to create a resource record set that routes\n\t\t\ttraffic for test.example.com to a web server that has an IP address of\n\t\t\t192.0.2.44.

\n

\n Deleting Resource Record Sets\n

\n

To delete a resource record set, you must specify all the same values that you\n\t\t\tspecified when you created it.

\n

\n Change Batches and Transactional Changes\n

\n

The request body must include a document with a\n\t\t\t\tChangeResourceRecordSetsRequest element. The request body contains a\n\t\t\tlist of change items, known as a change batch. Change batches are considered\n\t\t\ttransactional changes. Route 53 validates the changes in the request and then either\n\t\t\tmakes all or none of the changes in the change batch request. This ensures that DNS\n\t\t\trouting isn't adversely affected by partial changes to the resource record sets in a\n\t\t\thosted zone.

\n

For example, suppose a change batch request contains two changes: it deletes the\n\t\t\t\tCNAME resource record set for www.example.com and creates an alias\n\t\t\tresource record set for www.example.com. If validation for both records succeeds, Route\n\t\t\t53 deletes the first resource record set and creates the second resource record set in a\n\t\t\tsingle operation. If validation for either the DELETE or the\n\t\t\t\tCREATE action fails, then the request is canceled, and the original\n\t\t\t\tCNAME record continues to exist.

\n \n

If you try to delete the same resource record set more than once in a single\n\t\t\t\tchange batch, Route 53 returns an InvalidChangeBatch error.

\n
\n

\n Traffic Flow\n

\n

To create resource record sets for complex routing configurations, use either the\n\t\t\ttraffic flow visual editor in the Route 53 console or the API actions for traffic\n\t\t\tpolicies and traffic policy instances. Save the configuration as a traffic policy, then\n\t\t\tassociate the traffic policy with one or more domain names (such as example.com) or\n\t\t\tsubdomain names (such as www.example.com), in the same hosted zone or in multiple hosted\n\t\t\tzones. You can roll back the updates if the new configuration isn't performing as\n\t\t\texpected. For more information, see Using Traffic Flow to Route\n\t\t\t\tDNS Traffic in the Amazon Route 53 Developer\n\t\t\tGuide.

\n

\n Create, Delete, and Upsert\n

\n

Use ChangeResourceRecordsSetsRequest to perform the following\n\t\t\tactions:

\n
    \n
  • \n

    \n CREATE: Creates a resource record set that has the specified\n\t\t\t\t\tvalues.

    \n
  • \n
  • \n

    \n DELETE: Deletes an existing resource record set that has the\n\t\t\t\t\tspecified values.

    \n
  • \n
  • \n

    \n UPSERT: If a resource set exists Route 53 updates it with the\n\t\t\t\t\tvalues in the request.

    \n
  • \n
\n

\n Syntaxes for Creating, Updating, and Deleting Resource Record\n\t\t\t\tSets\n

\n

The syntax for a request depends on the type of resource record set that you want to\n\t\t\tcreate, delete, or update, such as weighted, alias, or failover. The XML elements in\n\t\t\tyour request must appear in the order listed in the syntax.

\n

For an example for each type of resource record set, see \"Examples.\"

\n

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes\n\t\t\tall of the elements for every kind of resource record set that you can create, delete,\n\t\t\tor update by using ChangeResourceRecordSets.

\n

\n Change Propagation to Route 53 DNS Servers\n

\n

When you submit a ChangeResourceRecordSets request, Route 53 propagates your\n\t\t\tchanges to all of the Route 53 authoritative DNS servers managing the hosted zone. While\n\t\t\tyour changes are propagating, GetChange returns a status of\n\t\t\t\tPENDING. When propagation is complete, GetChange returns a\n\t\t\tstatus of INSYNC. Changes generally propagate to all Route 53 name servers\n\t\t\tmanaging the hosted zone within 60 seconds. For more information, see GetChange.

\n

\n Limits on ChangeResourceRecordSets Requests\n

\n

For information about the limits on a ChangeResourceRecordSets request,\n\t\t\tsee Limits in the Amazon Route 53 Developer Guide.

", + "smithy.api#documentation": "

Creates, changes, or deletes a resource record set, which contains authoritative DNS\n\t\t\tinformation for a specified domain name or subdomain name. For example, you can use\n\t\t\t\tChangeResourceRecordSets to create a resource record set that routes\n\t\t\ttraffic for test.example.com to a web server that has an IP address of\n\t\t\t192.0.2.44.

\n

\n Deleting Resource Record Sets\n

\n

To delete a resource record set, you must specify all the same values that you\n\t\t\tspecified when you created it.

\n

\n Change Batches and Transactional Changes\n

\n

The request body must include a document with a\n\t\t\t\tChangeResourceRecordSetsRequest element. The request body contains a\n\t\t\tlist of change items, known as a change batch. Change batches are considered\n\t\t\ttransactional changes. Route 53 validates the changes in the request and then either\n\t\t\tmakes all or none of the changes in the change batch request. This ensures that DNS\n\t\t\trouting isn't adversely affected by partial changes to the resource record sets in a\n\t\t\thosted zone.

\n

For example, suppose a change batch request contains two changes: it deletes the\n\t\t\t\tCNAME resource record set for www.example.com and creates an alias\n\t\t\tresource record set for www.example.com. If validation for both records succeeds, Route\n\t\t\t53 deletes the first resource record set and creates the second resource record set in a\n\t\t\tsingle operation. If validation for either the DELETE or the\n\t\t\t\tCREATE action fails, then the request is canceled, and the original\n\t\t\t\tCNAME record continues to exist.

\n \n

If you try to delete the same resource record set more than once in a single\n\t\t\t\tchange batch, Route 53 returns an InvalidChangeBatch error.

\n
\n

\n Traffic Flow\n

\n

To create resource record sets for complex routing configurations, use either the\n\t\t\ttraffic flow visual editor in the Route 53 console or the API actions for traffic\n\t\t\tpolicies and traffic policy instances. Save the configuration as a traffic policy, then\n\t\t\tassociate the traffic policy with one or more domain names (such as example.com) or\n\t\t\tsubdomain names (such as www.example.com), in the same hosted zone or in multiple hosted\n\t\t\tzones. You can roll back the updates if the new configuration isn't performing as\n\t\t\texpected. For more information, see Using Traffic Flow to Route\n\t\t\t\tDNS Traffic in the Amazon Route 53 Developer\n\t\t\tGuide.

\n

\n Create, Delete, and Upsert\n

\n

Use ChangeResourceRecordsSetsRequest to perform the following\n\t\t\tactions:

\n
    \n
  • \n

    \n CREATE: Creates a resource record set that has the specified\n\t\t\t\t\tvalues.

    \n
  • \n
  • \n

    \n DELETE: Deletes an existing resource record set that has the\n\t\t\t\t\tspecified values.

    \n
  • \n
  • \n

    \n UPSERT: If a resource set doesn't exist, Route 53 creates it. If a resource\n\t\t\t\t\tset exists Route 53 updates it with the values in the request.

    \n
  • \n
\n

\n Syntaxes for Creating, Updating, and Deleting Resource Record\n\t\t\t\tSets\n

\n

The syntax for a request depends on the type of resource record set that you want to\n\t\t\tcreate, delete, or update, such as weighted, alias, or failover. The XML elements in\n\t\t\tyour request must appear in the order listed in the syntax.

\n

For an example for each type of resource record set, see \"Examples.\"

\n

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes\n\t\t\tall of the elements for every kind of resource record set that you can create, delete,\n\t\t\tor update by using ChangeResourceRecordSets.

\n

\n Change Propagation to Route 53 DNS Servers\n

\n

When you submit a ChangeResourceRecordSets request, Route 53 propagates your\n\t\t\tchanges to all of the Route 53 authoritative DNS servers managing the hosted zone. While\n\t\t\tyour changes are propagating, GetChange returns a status of\n\t\t\t\tPENDING. When propagation is complete, GetChange returns a\n\t\t\tstatus of INSYNC. Changes generally propagate to all Route 53 name servers\n\t\t\tmanaging the hosted zone within 60 seconds. For more information, see GetChange.

\n

\n Limits on ChangeResourceRecordSets Requests\n

\n

For information about the limits on a ChangeResourceRecordSets request,\n\t\t\tsee Limits in the Amazon Route 53 Developer Guide.

", "smithy.api#examples": [ { "title": "To create a basic resource record set", @@ -3016,7 +3016,7 @@ "CallerReference": { "target": "com.amazonaws.route53#HealthCheckNonce", "traits": { - "smithy.api#documentation": "

A unique string that identifies the request and that allows you to retry a failed\n\t\t\t\tCreateHealthCheck request without the risk of creating two identical\n\t\t\thealth checks:

\n
    \n
  • \n

    If you send a CreateHealthCheck request with the same\n\t\t\t\t\t\tCallerReference and settings as a previous request, and if the\n\t\t\t\t\thealth check doesn't exist, Amazon Route 53 creates the health check. If the\n\t\t\t\t\thealth check does exist, Route 53 returns the settings for the existing health\n\t\t\t\t\tcheck.

    \n
  • \n
  • \n

    If you send a CreateHealthCheck request with the same\n\t\t\t\t\t\tCallerReference as a deleted health check, regardless of the\n\t\t\t\t\tsettings, Route 53 returns a HealthCheckAlreadyExists error.

    \n
  • \n
  • \n

    If you send a CreateHealthCheck request with the same\n\t\t\t\t\t\tCallerReference as an existing health check but with different\n\t\t\t\t\tsettings, Route 53 returns a HealthCheckAlreadyExists error.

    \n
  • \n
  • \n

    If you send a CreateHealthCheck request with a unique\n\t\t\t\t\t\tCallerReference but settings identical to an existing health\n\t\t\t\t\tcheck, Route 53 creates the health check.

    \n
  • \n
", + "smithy.api#documentation": "

A unique string that identifies the request and that allows you to retry a failed\n\t\t\t\tCreateHealthCheck request without the risk of creating two identical\n\t\t\thealth checks:

\n
    \n
  • \n

    If you send a CreateHealthCheck request with the same\n\t\t\t\t\t\tCallerReference and settings as a previous request, and if the\n\t\t\t\t\thealth check doesn't exist, Amazon Route 53 creates the health check. If the\n\t\t\t\t\thealth check does exist, Route 53 returns the settings for the existing health\n\t\t\t\t\tcheck.

    \n
  • \n
  • \n

    If you send a CreateHealthCheck request with the same\n\t\t\t\t\t\tCallerReference as a deleted health check, regardless of the\n\t\t\t\t\tsettings, Route 53 returns a HealthCheckAlreadyExists error.

    \n
  • \n
  • \n

    If you send a CreateHealthCheck request with the same\n\t\t\t\t\t\tCallerReference as an existing health check but with different\n\t\t\t\t\tsettings, Route 53 returns a HealthCheckAlreadyExists error.

    \n
  • \n
  • \n

    If you send a CreateHealthCheck request with a unique\n\t\t\t\t\t\tCallerReference but settings identical to an existing health\n\t\t\t\t\tcheck, Route 53 creates the health check.

    \n
  • \n
\n

Route 53 does not store the CallerReference for a deleted health check indefinitely. \n\t\t\tThe CallerReference for a deleted health check will be deleted after a number of days.

", "smithy.api#required": {} } }, @@ -3532,7 +3532,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates resource record sets in a specified hosted zone based on the settings in a\n\t\t\tspecified traffic policy version. In addition, CreateTrafficPolicyInstance\n\t\t\tassociates the resource record sets with a specified domain name (such as example.com)\n\t\t\tor subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for\n\t\t\tthe domain or subdomain name by using the resource record sets that\n\t\t\t\tCreateTrafficPolicyInstance created.

", + "smithy.api#documentation": "

Creates resource record sets in a specified hosted zone based on the settings in a\n\t\t\tspecified traffic policy version. In addition, CreateTrafficPolicyInstance\n\t\t\tassociates the resource record sets with a specified domain name (such as example.com)\n\t\t\tor subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for\n\t\t\tthe domain or subdomain name by using the resource record sets that\n\t\t\t\tCreateTrafficPolicyInstance created.

\n \n

After you submit an CreateTrafficPolicyInstance request, there's a\n\t\t\t\tbrief delay while Amazon Route 53 creates the resource record sets that are\n\t\t\t\tspecified in the traffic policy definition. \n\t\t\t\tUse GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance\n\t\t\t\trequest completed successfully. For more information, see the\n\t\t\t\tState response element.

\n
", "smithy.api#http": { "method": "POST", "uri": "/2013-04-01/trafficpolicyinstance", @@ -4951,7 +4951,7 @@ "CountryCode": { "target": "com.amazonaws.route53#GeoLocationCountryCode", "traits": { - "smithy.api#documentation": "

For geolocation resource record sets, the two-letter code for a country.

\n

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1\n\t\t\t\talpha-2.

" + "smithy.api#documentation": "

For geolocation resource record sets, the two-letter code for a country.

\n

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1\n\t\t\t\talpha-2.

\n

Route 53 also supports the contry code UA forr Ukraine.

" } }, "SubdivisionCode": { @@ -5355,7 +5355,7 @@ "CountryCode": { "target": "com.amazonaws.route53#GeoLocationCountryCode", "traits": { - "smithy.api#documentation": "

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1\n\t\t\t\talpha-2.

", + "smithy.api#documentation": "

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1\n\t\t\t\talpha-2.

\n

Route 53 also supports the contry code UA forr Ukraine.

", "smithy.api#httpQuery": "countrycode" } }, @@ -6060,7 +6060,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a specified traffic policy instance.

\n \n

After you submit a CreateTrafficPolicyInstance or an\n\t\t\t\t\tUpdateTrafficPolicyInstance request, there's a brief delay while\n\t\t\t\tAmazon Route 53 creates the resource record sets that are specified in the traffic\n\t\t\t\tpolicy definition. For more information, see the State response\n\t\t\t\telement.

\n
\n \n

In the Route 53 console, traffic policy instances are known as policy\n\t\t\t\trecords.

\n
", + "smithy.api#documentation": "

Gets information about a specified traffic policy instance.

\n \n

\n\t\t\t\tUse GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the \n\t\t\t\tCreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request completed successfully. \n\t\t\t\tFor more information, see the State response\n\t\t\t\telement.

\n
\n \n

In the Route 53 console, traffic policy instances are known as policy\n\t\t\t\trecords.

\n
", "smithy.api#http": { "method": "GET", "uri": "/2013-04-01/trafficpolicyinstance/{Id}", @@ -6858,6 +6858,17 @@ "smithy.api#documentation": "

In the response to a ListHostedZonesByVPC request, the\n\t\t\t\tHostedZoneSummaries element contains one HostedZoneSummary\n\t\t\telement for each hosted zone that the specified Amazon VPC is associated with. Each\n\t\t\t\tHostedZoneSummary element contains the hosted zone name and ID, and\n\t\t\tinformation about who owns the hosted zone.

" } }, + "com.amazonaws.route53#HostedZoneType": { + "type": "enum", + "members": { + "PRIVATE_HOSTED_ZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PrivateHostedZone" + } + } + } + }, "com.amazonaws.route53#HostedZones": { "type": "list", "member": { @@ -7716,7 +7727,7 @@ "MaxItems": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of health checks that you want ListHealthChecks to\n\t\t\treturn in response to the current request. Amazon Route 53 returns a maximum of 100\n\t\t\titems. If you set MaxItems to a value greater than 100, Route 53 returns\n\t\t\tonly the first 100 health checks.

", + "smithy.api#documentation": "

The maximum number of health checks that you want ListHealthChecks to\n\t\t\treturn in response to the current request. Amazon Route 53 returns a maximum of 1000\n\t\t\titems. If you set MaxItems to a value greater than 1000, Route 53 returns\n\t\t\tonly the first 1000 health checks.

", "smithy.api#httpQuery": "maxitems" } } @@ -8029,6 +8040,13 @@ "smithy.api#documentation": "

If you're using reusable delegation sets and you want to list all of the hosted zones\n\t\t\tthat are associated with a reusable delegation set, specify the ID of that reusable\n\t\t\tdelegation set.

", "smithy.api#httpQuery": "delegationsetid" } + }, + "HostedZoneType": { + "target": "com.amazonaws.route53#HostedZoneType", + "traits": { + "smithy.api#documentation": "

\n\t\t\t(Optional) Specifies if the hosted zone is private.\n\t\t

", + "smithy.api#httpQuery": "hostedzonetype" + } } }, "traits": { @@ -11317,7 +11335,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the resource record sets in a specified hosted zone that were created based on\n\t\t\tthe settings in a specified traffic policy version.

\n

When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS\n\t\t\tqueries for the root resource record set name (such as example.com) while it replaces\n\t\t\tone group of resource record sets with another. Route 53 performs the following\n\t\t\toperations:

\n
    \n
  1. \n

    Route 53 creates a new group of resource record sets based on the specified\n\t\t\t\t\ttraffic policy. This is true regardless of how significant the differences are\n\t\t\t\t\tbetween the existing resource record sets and the new resource record sets.\n\t\t\t\t

    \n
  2. \n
  3. \n

    When all of the new resource record sets have been created, Route 53 starts to\n\t\t\t\t\trespond to DNS queries for the root resource record set name (such as\n\t\t\t\t\texample.com) by using the new resource record sets.

    \n
  4. \n
  5. \n

    Route 53 deletes the old group of resource record sets that are associated\n\t\t\t\t\twith the root resource record set name.

    \n
  6. \n
", + "smithy.api#documentation": "\n

After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while Route 53 creates the resource record sets \n\t\t\tthat are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance confirm \n\t\t\tthat the \n\t\t\tUpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.

\n
\n

Updates the resource record sets in a specified hosted zone that were created based on\n\t\t\tthe settings in a specified traffic policy version.

\n

When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS\n\t\t\tqueries for the root resource record set name (such as example.com) while it replaces\n\t\t\tone group of resource record sets with another. Route 53 performs the following\n\t\t\toperations:

\n
    \n
  1. \n

    Route 53 creates a new group of resource record sets based on the specified\n\t\t\t\t\ttraffic policy. This is true regardless of how significant the differences are\n\t\t\t\t\tbetween the existing resource record sets and the new resource record sets.\n\t\t\t\t

    \n
  2. \n
  3. \n

    When all of the new resource record sets have been created, Route 53 starts to\n\t\t\t\t\trespond to DNS queries for the root resource record set name (such as\n\t\t\t\t\texample.com) by using the new resource record sets.

    \n
  4. \n
  5. \n

    Route 53 deletes the old group of resource record sets that are associated\n\t\t\t\t\twith the root resource record set name.

    \n
  6. \n
", "smithy.api#http": { "method": "POST", "uri": "/2013-04-01/trafficpolicyinstance/{Id}", diff --git a/models/sagemaker.json b/models/sagemaker.json index 76965dcdf1..d93f2c3762 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -371,6 +371,45 @@ } } }, + "com.amazonaws.sagemaker#AdditionalS3DataSource": { + "type": "structure", + "members": { + "S3DataType": { + "target": "com.amazonaws.sagemaker#AdditionalS3DataSourceDataType", + "traits": { + "smithy.api#documentation": "

The data type of the additional data source that you specify for use in inference or\n training.

", + "smithy.api#required": {} + } + }, + "S3Uri": { + "target": "com.amazonaws.sagemaker#S3Uri", + "traits": { + "smithy.api#documentation": "

The uniform resource identifier (URI) used to identify an additional data source used\n in inference or training.

", + "smithy.api#required": {} + } + }, + "CompressionType": { + "target": "com.amazonaws.sagemaker#CompressionType", + "traits": { + "smithy.api#documentation": "

The type of compression used for an additional data source used in inference or\n training. Specify None if your additional data source is not compressed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A data source used for training or inference that is in addition to the input dataset\n or model data.

" + } + }, + "com.amazonaws.sagemaker#AdditionalS3DataSourceDataType": { + "type": "enum", + "members": { + "S3OBJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3Object" + } + } + } + }, "com.amazonaws.sagemaker#AgentVersion": { "type": "structure", "members": { @@ -4248,7 +4287,7 @@ "TimeSeriesForecastingSettings": { "target": "com.amazonaws.sagemaker#TimeSeriesForecastingSettings", "traits": { - "smithy.api#documentation": "

Time series forecast settings for the Canvas application.

" + "smithy.api#documentation": "

Time series forecast settings for the SageMaker Canvas application.

" } }, "ModelRegisterSettings": { @@ -4268,6 +4307,18 @@ "traits": { "smithy.api#documentation": "

The settings for connecting to an external data source with OAuth.

" } + }, + "KendraSettings": { + "target": "com.amazonaws.sagemaker#KendraSettings", + "traits": { + "smithy.api#documentation": "

The settings for document querying.

" + } + }, + "DirectDeploySettings": { + "target": "com.amazonaws.sagemaker#DirectDeploySettings", + "traits": { + "smithy.api#documentation": "

The model deployment settings for the SageMaker Canvas application.

" + } } }, "traits": { @@ -4293,7 +4344,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the type and size of the endpoint capacity to activate for a blue/green deployment, a rolling deployment, or a rollback strategy.\n You can specify your batches as either instance count or the overall percentage or your fleet.

\n

For a rollback strategy, if you don't specify the fields in this object, or if you set the Value to 100%, then SageMaker\n uses a blue/green rollback strategy and rolls all traffic back to the blue fleet.

" + "smithy.api#documentation": "

Specifies the type and size of the endpoint capacity to activate for a blue/green\n deployment, a rolling deployment, or a rollback strategy. You can specify your batches\n as either instance count or the overall percentage or your fleet.

\n

For a rollback strategy, if you don't specify the fields in this object, or if you set\n the Value to 100%, then SageMaker uses a blue/green rollback strategy and rolls\n all traffic back to the blue fleet.

" } }, "com.amazonaws.sagemaker#CapacitySizeType": { @@ -6183,7 +6234,7 @@ "ModelDataSource": { "target": "com.amazonaws.sagemaker#ModelDataSource", "traits": { - "smithy.api#documentation": "

Specifies the location of ML model data to deploy.

\n \n

Currently you cannot use ModelDataSource in conjunction with\n SageMaker batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker\n Marketplace.

\n
" + "smithy.api#documentation": "

Specifies the location of ML model data to deploy.

\n \n

Currently you cannot use ModelDataSource in conjunction with SageMaker\n batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker\n Marketplace.

\n
" } } }, @@ -8056,7 +8107,7 @@ } ], "traits": { - "smithy.api#documentation": "

Create a new FeatureGroup. A FeatureGroup is a group of\n Features defined in the FeatureStore to describe a\n Record.

\n

The FeatureGroup defines the schema and features contained in the\n FeatureGroup. A FeatureGroup definition is composed of a list of\n Features, a RecordIdentifierFeatureName, an\n EventTimeFeatureName and configurations for its OnlineStore\n and OfflineStore. Check Amazon Web Services service\n quotas to see the FeatureGroups quota for your Amazon Web Services\n account.

\n \n

You must include at least one of OnlineStoreConfig and\n OfflineStoreConfig to create a FeatureGroup.

\n
" + "smithy.api#documentation": "

Create a new FeatureGroup. A FeatureGroup is a group of\n Features defined in the FeatureStore to describe a\n Record.

\n

The FeatureGroup defines the schema and features contained in the\n FeatureGroup. A FeatureGroup definition is composed of a list\n of Features, a RecordIdentifierFeatureName, an\n EventTimeFeatureName and configurations for its OnlineStore\n and OfflineStore. Check Amazon Web Services service\n quotas to see the FeatureGroups quota for your Amazon Web Services\n account.

\n

Note that it can take approximately 10-15 minutes to provision an\n OnlineStore\n FeatureGroup with the InMemory\n StorageType.

\n \n

You must include at least one of OnlineStoreConfig and\n OfflineStoreConfig to create a FeatureGroup.

\n
" } }, "com.amazonaws.sagemaker#CreateFeatureGroupRequest": { @@ -12564,7 +12615,7 @@ } ], "traits": { - "smithy.api#documentation": "

Delete the FeatureGroup and any data that was written to the\n OnlineStore of the FeatureGroup. Data cannot be accessed from\n the OnlineStore immediately after DeleteFeatureGroup is called.

\n

Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your\n OfflineStore are not deleted.

" + "smithy.api#documentation": "

Delete the FeatureGroup and any data that was written to the\n OnlineStore of the FeatureGroup. Data cannot be accessed from\n the OnlineStore immediately after DeleteFeatureGroup is called.

\n

Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your\n OfflineStore are not deleted.

\n

Note that it can take approximately 10-15 minutes to delete an OnlineStore\n FeatureGroup with the InMemory\n StorageType.

" } }, "com.amazonaws.sagemaker#DeleteFeatureGroupRequest": { @@ -16055,7 +16106,7 @@ "EndpointStatus": { "target": "com.amazonaws.sagemaker#EndpointStatus", "traits": { - "smithy.api#documentation": "

The status of the endpoint.

\n
    \n
  • \n

    \n OutOfService: Endpoint is not available to take incoming\n requests.

    \n
  • \n
  • \n

    \n Creating: CreateEndpoint is executing.

    \n
  • \n
  • \n

    \n Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.

    \n
  • \n
  • \n

    \n SystemUpdating: Endpoint is undergoing maintenance and cannot be\n updated or deleted or re-scaled until it has completed. This maintenance\n operation does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.

    \n
  • \n
  • \n

    \n RollingBack: Endpoint fails to scale up or down or change its\n variant weight and is in the process of rolling back to its previous\n configuration. Once the rollback completes, endpoint returns to an\n InService status. This transitional status only applies to an\n endpoint that has autoscaling enabled and is undergoing variant weight or\n capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called\n explicitly.

    \n
  • \n
  • \n

    \n InService: Endpoint is available to process incoming\n requests.

    \n
  • \n
  • \n

    \n Deleting: DeleteEndpoint is executing.

    \n
  • \n
  • \n

    \n Failed: Endpoint could not be created, updated, or re-scaled. Use\n the FailureReason value returned by DescribeEndpoint for information about the failure. DeleteEndpoint is the only operation that can be performed on a\n failed endpoint.

    \n
  • \n
  • \n

    \n UpdateRollbackFailed: Both the rolling deployment and auto-rollback failed. Your endpoint\n is in service with a mix of the old and new endpoint configurations. For information about how to remedy\n this issue and restore the endpoint's status to InService, see\n Rolling Deployments.

    \n
  • \n
", + "smithy.api#documentation": "

The status of the endpoint.

\n
    \n
  • \n

    \n OutOfService: Endpoint is not available to take incoming\n requests.

    \n
  • \n
  • \n

    \n Creating: CreateEndpoint is executing.

    \n
  • \n
  • \n

    \n Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.

    \n
  • \n
  • \n

    \n SystemUpdating: Endpoint is undergoing maintenance and cannot be\n updated or deleted or re-scaled until it has completed. This maintenance\n operation does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.

    \n
  • \n
  • \n

    \n RollingBack: Endpoint fails to scale up or down or change its\n variant weight and is in the process of rolling back to its previous\n configuration. Once the rollback completes, endpoint returns to an\n InService status. This transitional status only applies to an\n endpoint that has autoscaling enabled and is undergoing variant weight or\n capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called\n explicitly.

    \n
  • \n
  • \n

    \n InService: Endpoint is available to process incoming\n requests.

    \n
  • \n
  • \n

    \n Deleting: DeleteEndpoint is executing.

    \n
  • \n
  • \n

    \n Failed: Endpoint could not be created, updated, or re-scaled. Use\n the FailureReason value returned by DescribeEndpoint for information about the failure. DeleteEndpoint is the only operation that can be performed on a\n failed endpoint.

    \n
  • \n
  • \n

    \n UpdateRollbackFailed: Both the rolling deployment and\n auto-rollback failed. Your endpoint is in service with a mix of the old and new\n endpoint configurations. For information about how to remedy this issue and\n restore the endpoint's status to InService, see Rolling\n Deployments.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -21642,6 +21693,20 @@ } } }, + "com.amazonaws.sagemaker#DirectDeploySettings": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.sagemaker#FeatureStatus", + "traits": { + "smithy.api#documentation": "

Describes whether model deployment permissions are enabled or disabled in the Canvas application.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The model deployment settings for the SageMaker Canvas application.

\n \n

In order to enable model deployment for Canvas, the SageMaker Domain's or user profile's Amazon Web Services IAM\n execution role must have the AmazonSageMakerCanvasDirectDeployAccess policy attached. You can also\n turn on model deployment permissions through the SageMaker Domain's or user profile's settings in the SageMaker console.

\n
" + } + }, "com.amazonaws.sagemaker#DirectInternetAccess": { "type": "enum", "members": { @@ -26776,7 +26841,7 @@ "target": "com.amazonaws.sagemaker#TrainingInstanceCount", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of instances of the type specified by InstanceType. Choose an\n instance count larger than 1 for distributed training algorithms. See Step 2:\n Launch a SageMaker Distributed Training Job Using the SageMaker Python SDK for more information.

", + "smithy.api#documentation": "

The number of instances of the type specified by InstanceType. Choose an\n instance count larger than 1 for distributed training algorithms. See Step 2:\n Launch a SageMaker Distributed Training Job Using the SageMaker Python SDK for more\n information.

", "smithy.api#required": {} } }, @@ -27457,7 +27522,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Amazon SageMaker Canvas app setting where you configure OAuth for connecting to an external\n data source, such as Snowflake.

" + "smithy.api#documentation": "

The Amazon SageMaker Canvas application setting where you configure OAuth for connecting to an external\n data source, such as Snowflake.

" } }, "com.amazonaws.sagemaker#IdentityProviderOAuthSettings": { @@ -29496,6 +29561,20 @@ } } }, + "com.amazonaws.sagemaker#KendraSettings": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.sagemaker#FeatureStatus", + "traits": { + "smithy.api#documentation": "

Describes whether the document querying feature is enabled\n or disabled in the Canvas application.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon SageMaker Canvas application setting where you configure\n document querying.

" + } + }, "com.amazonaws.sagemaker#KernelDisplayName": { "type": "string", "traits": { @@ -38931,7 +39010,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the location of ML model data to deploy. If specified, you must specify\n one and only one of the available data sources.

" + "smithy.api#documentation": "

Specifies the location of ML model data to deploy. If specified, you must specify one\n and only one of the available data sources.

" } }, "com.amazonaws.sagemaker#ModelDeployConfig": { @@ -39568,6 +39647,12 @@ "traits": { "smithy.api#documentation": "

The name of a pre-trained machine learning benchmarked by \n Amazon SageMaker Inference Recommender model that matches your model. \n You can find a list of benchmarked models by calling ListModelMetadata.

" } + }, + "AdditionalS3DataSource": { + "target": "com.amazonaws.sagemaker#AdditionalS3DataSource", + "traits": { + "smithy.api#documentation": "

The additional data source that is used during inference in the Docker container for\n your model package.

" + } } }, "traits": { @@ -42665,7 +42750,7 @@ "CompressionType": { "target": "com.amazonaws.sagemaker#OutputCompressionType", "traits": { - "smithy.api#documentation": "

The model output compression type. Select None to output an uncompressed model, recommended for large model outputs. Defaults to gzip.

" + "smithy.api#documentation": "

The model output compression type. Select None to output an uncompressed\n model, recommended for large model outputs. Defaults to gzip.

" } } }, @@ -48780,7 +48865,7 @@ "WaitIntervalInSeconds": { "target": "com.amazonaws.sagemaker#WaitIntervalInSeconds", "traits": { - "smithy.api#documentation": "

The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet.

", + "smithy.api#documentation": "

The length of the baking period, during which SageMaker monitors alarms for each batch on\n the new fleet.

", "smithy.api#required": {} } }, @@ -48793,7 +48878,7 @@ "RollbackMaximumBatchSize": { "target": "com.amazonaws.sagemaker#CapacitySize", "traits": { - "smithy.api#documentation": "

Batch size for rollback to the old endpoint fleet. Each rolling step to provision\n capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the new\n endpoint fleet. If this field is absent, the default value will be set to 100% of total\n capacity which means to bring up the whole capacity of the old fleet at once during rollback.

" + "smithy.api#documentation": "

Batch size for rollback to the old endpoint fleet. Each rolling step to provision\n capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the\n new endpoint fleet. If this field is absent, the default value will be set to 100% of\n total capacity which means to bring up the whole capacity of the old fleet at once\n during rollback.

" } } }, @@ -48984,7 +49069,7 @@ "CompressionType": { "target": "com.amazonaws.sagemaker#ModelCompressionType", "traits": { - "smithy.api#documentation": "

Specifies how the ML model data is prepared.

\n

If you choose Gzip and choose S3Object as the value of\n S3DataType, S3Uri identifies an object that is a\n gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object\n during model deployment.

\n

If you choose None and chooose S3Object as the value of\n S3DataType, S3Uri identifies an object that represents an\n uncompressed ML model to deploy.

\n

If you choose None and choose S3Prefix as the value of\n S3DataType, S3Uri identifies a key name prefix, under which\n all objects represents the uncompressed ML model to deploy.

\n

If you choose None, then SageMaker will follow rules below when creating model data files\n under /opt/ml/model directory for use by your inference code:

\n
    \n
  • \n

    If you choose S3Object as the value of S3DataType,\n then SageMaker will split the key of the S3 object referenced by S3Uri by\n slash (/), and use the last part as the filename of the file holding the content\n of the S3 object.

    \n
  • \n
  • \n

    If you choose S3Prefix as the value of S3DataType,\n then for each S3 object under the key name pefix referenced by S3Uri,\n SageMaker will trim its key by the prefix, and use the remainder as the path\n (relative to /opt/ml/model) of the file holding the content of the\n S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as\n directory names and the last part as filename of the file holding the content of\n the S3 object.

    \n
  • \n
  • \n

    Do not use any of the following as file names or directory names:

    \n
      \n
    • \n

      An empty or blank string

      \n
    • \n
    • \n

      A string which contains null bytes

      \n
    • \n
    • \n

      A string longer than 255 bytes

      \n
    • \n
    • \n

      A single dot (.)

      \n
    • \n
    • \n

      A double dot (..)

      \n
    • \n
    \n
  • \n
  • \n

    Ambiguous file names will result in model deployment failure. For example,\n if your uncompressed ML model consists of two S3 objects\n s3://mybucket/model/weights and s3://mybucket/model/weights/part1\n and you specify s3://mybucket/model/ as the value of S3Uri and\n S3Prefix as the value of S3DataType, then it will result in name\n clash between /opt/ml/model/weights (a regular file) and\n /opt/ml/model/weights/ (a directory).

    \n
  • \n
  • \n

    Do not organize the model artifacts in\n S3 console using folders.\n When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the\n folder name you provide. They key of the 0-byte object ends with a slash (/) which violates\n SageMaker restrictions on model artifact file names, leading to model deployment failure.\n

    \n
  • \n
", + "smithy.api#documentation": "

Specifies how the ML model data is prepared.

\n

If you choose Gzip and choose S3Object as the value of\n S3DataType, S3Uri identifies an object that is a\n gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during\n model deployment.

\n

If you choose None and chooose S3Object as the value of\n S3DataType, S3Uri identifies an object that represents an\n uncompressed ML model to deploy.

\n

If you choose None and choose S3Prefix as the value of\n S3DataType, S3Uri identifies a key name prefix, under\n which all objects represents the uncompressed ML model to deploy.

\n

If you choose None, then SageMaker will follow rules below when creating model data files\n under /opt/ml/model directory for use by your inference code:

\n
    \n
  • \n

    If you choose S3Object as the value of S3DataType,\n then SageMaker will split the key of the S3 object referenced by S3Uri\n by slash (/), and use the last part as the filename of the file holding the\n content of the S3 object.

    \n
  • \n
  • \n

    If you choose S3Prefix as the value of S3DataType,\n then for each S3 object under the key name pefix referenced by\n S3Uri, SageMaker will trim its key by the prefix, and use the\n remainder as the path (relative to /opt/ml/model) of the file\n holding the content of the S3 object. SageMaker will split the remainder by slash\n (/), using intermediate parts as directory names and the last part as filename\n of the file holding the content of the S3 object.

    \n
  • \n
  • \n

    Do not use any of the following as file names or directory names:

    \n
      \n
    • \n

      An empty or blank string

      \n
    • \n
    • \n

      A string which contains null bytes

      \n
    • \n
    • \n

      A string longer than 255 bytes

      \n
    • \n
    • \n

      A single dot (.)

      \n
    • \n
    • \n

      A double dot (..)

      \n
    • \n
    \n
  • \n
  • \n

    Ambiguous file names will result in model deployment failure. For example, if\n your uncompressed ML model consists of two S3 objects\n s3://mybucket/model/weights and\n s3://mybucket/model/weights/part1 and you specify\n s3://mybucket/model/ as the value of S3Uri and\n S3Prefix as the value of S3DataType, then it will\n result in name clash between /opt/ml/model/weights (a regular file)\n and /opt/ml/model/weights/ (a directory).

    \n
  • \n
  • \n

    Do not organize the model artifacts in S3 console using\n folders. When you create a folder in S3 console, S3 creates a 0-byte\n object with a key set to the folder name you provide. They key of the 0-byte\n object ends with a slash (/) which violates SageMaker restrictions on model artifact\n file names, leading to model deployment failure.

    \n
  • \n
", "smithy.api#required": {} } } @@ -54835,7 +54920,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 10 + "max": 100 } } }, @@ -54870,7 +54955,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 48 + "max": 100 } } }, @@ -55852,6 +55937,12 @@ "traits": { "smithy.api#documentation": "

A list of the metrics that the algorithm emits that can be used as the objective\n metric in a hyperparameter tuning job.

" } + }, + "AdditionalS3DataSource": { + "target": "com.amazonaws.sagemaker#AdditionalS3DataSource", + "traits": { + "smithy.api#documentation": "

The additional data source used during the training job.

" + } } }, "traits": { diff --git a/models/securityhub.json b/models/securityhub.json index 17bc5221df..ba5b8987ae 100644 --- a/models/securityhub.json +++ b/models/securityhub.json @@ -4877,6 +4877,297 @@ "smithy.api#documentation": "

Contains the cross-origin resource sharing (CORS) configuration for the API. CORS is\n only supported for HTTP APIs.

" } }, + "com.amazonaws.securityhub#AwsDmsEndpointDetails": { + "type": "structure", + "members": { + "CertificateArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) for the SSL certificate that encrypts connections between the DMS endpoint and the \n replication instance.\n

" + } + }, + "DatabaseName": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The name of the endpoint database.

" + } + }, + "EndpointArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the endpoint.\n

" + } + }, + "EndpointIdentifier": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The database endpoint identifier.\n

" + } + }, + "EndpointType": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The type of endpoint. Valid values are source and target.\n

" + } + }, + "EngineName": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The type of engine for the endpoint, depending on the EndpointType value.\n

" + } + }, + "ExternalId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n A value that can be used for cross-account validation.\n

" + } + }, + "ExtraConnectionAttributes": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Additional attributes associated with the connection.\n

" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n An DMS key identifier that is used to encrypt the connection parameters for the endpoint.\n If you don't specify a value for the KmsKeyId parameter, then DMS uses your default \n encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your \n Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.

" + } + }, + "Port": { + "target": "com.amazonaws.securityhub#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The port used to access the endpoint.\n

" + } + }, + "ServerName": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The name of the server where the endpoint database resides.

" + } + }, + "SslMode": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The SSL mode used to connect to the endpoint. The default is none.

" + } + }, + "Username": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The user name to be used to log in to the endpoint database.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about an Database Migration Service (DMS) endpoint. An endpoint provides connection, data \n store type, and location information about your data store.\n

" + } + }, + "com.amazonaws.securityhub#AwsDmsReplicationInstanceDetails": { + "type": "structure", + "members": { + "AllocatedStorage": { + "target": "com.amazonaws.securityhub#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The amount of storage (in gigabytes) that is allocated for the replication instance.\n

" + } + }, + "AutoMinorVersionUpgrade": { + "target": "com.amazonaws.securityhub#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

\n Indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance \n window.\n

" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Availability Zone that the replication instance is created in. The default value is a random, system-chosen \n Availability Zone in the endpoint's Amazon Web Services Region, such as us-east-1d.

" + } + }, + "EngineVersion": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The engine version number of the replication instance. If an engine version number is not specified when a \n replication instance is created, the default is the latest engine version available.\n

" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n An KMS key identifier that is used to encrypt the data on the replication instance. If you don't \n specify a value for the KmsKeyId parameter, DMS uses your default encryption key. \n KMS creates the default encryption key for your Amazon Web Services account. Your \n Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.

" + } + }, + "MultiAZ": { + "target": "com.amazonaws.securityhub#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

\n Specifies whether the replication instance is deployed across multiple Availability Zones (AZs). You can't set the \n AvailabilityZone parameter if the MultiAZ parameter is set to true.

" + } + }, + "PreferredMaintenanceWindow": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The maintenance window times for the replication instance. Upgrades to the replication instance are performed during \n this time.

" + } + }, + "PubliclyAccessible": { + "target": "com.amazonaws.securityhub#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

\n Specifies the accessibility options for the replication instance. A value of true represents an instance \n with a public IP address. A value of false represents an instance with a private IP address. The default \n value is true.

" + } + }, + "ReplicationInstanceClass": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The compute and memory capacity of the replication instance as defined for the specified replication instance class.

" + } + }, + "ReplicationInstanceIdentifier": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The replication instance identifier.

" + } + }, + "ReplicationSubnetGroup": { + "target": "com.amazonaws.securityhub#AwsDmsReplicationInstanceReplicationSubnetGroupDetails", + "traits": { + "smithy.api#documentation": "

\n The subnet group for the replication instance.

" + } + }, + "VpcSecurityGroups": { + "target": "com.amazonaws.securityhub#AwsDmsReplicationInstanceVpcSecurityGroupsList", + "traits": { + "smithy.api#documentation": "

\n The virtual private cloud (VPC) security group for the replication instance.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about an Database Migration Service (DMS) replication instance. DMS uses a replication instance to connect to \n your source data store, read the source data, and format the data for consumption by the target data store.\n

" + } + }, + "com.amazonaws.securityhub#AwsDmsReplicationInstanceReplicationSubnetGroupDetails": { + "type": "structure", + "members": { + "ReplicationSubnetGroupIdentifier": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The identifier of the replication subnet group.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about the replication subnet group.

" + } + }, + "com.amazonaws.securityhub#AwsDmsReplicationInstanceVpcSecurityGroupsDetails": { + "type": "structure", + "members": { + "VpcSecurityGroupId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The identifier of the VPC security group that’s associated with the replication instance.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about the virtual private cloud (VPC) security group that’s associated with the replication instance.

" + } + }, + "com.amazonaws.securityhub#AwsDmsReplicationInstanceVpcSecurityGroupsList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsDmsReplicationInstanceVpcSecurityGroupsDetails" + } + }, + "com.amazonaws.securityhub#AwsDmsReplicationTaskDetails": { + "type": "structure", + "members": { + "CdcStartPosition": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Indicates when you want a change data capture (CDC) operation to start. CCdcStartPosition or \n CCdcStartTime specifies when you want a CDC operation to start. Only a value for one of these fields \n is included.

" + } + }, + "CdcStartTime": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Indicates the start time for a CDC operation. CdcStartPosition or CCdcStartTime specifies \n when you want a CDC operation to start. Only a value for one of these fields is included.

" + } + }, + "CdcStopPosition": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Indicates when you want a CDC operation to stop. The value can be either server time or commit time.

" + } + }, + "MigrationType": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The migration type.\n

" + } + }, + "Id": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The identifier of the replication task.

" + } + }, + "ResourceIdentifier": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n A display name for the resource identifier at the end of the EndpointArn response parameter. \n If you don't specify a ResourceIdentifier value, DMS generates a default identifier value for \n the end of EndpointArn.

" + } + }, + "ReplicationInstanceArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of a replication instance.\n

" + } + }, + "ReplicationTaskIdentifier": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The user-defined replication task identifier or name.

" + } + }, + "ReplicationTaskSettings": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The settings for the replication task.

" + } + }, + "SourceEndpointArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ARN of the source endpoint.

" + } + }, + "TableMappings": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The table mappings for the replication task, in JSON format.

" + } + }, + "TargetEndpointArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ARN of the target endpoint.

" + } + }, + "TaskData": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Supplemental information that the task requires to migrate the data for certain source and target endpoints.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about an Database Migration Service (DMS) replication task. A replication task moves a set of data from the \n source endpoint to the target endpoint.

" + } + }, "com.amazonaws.securityhub#AwsDynamoDbTableAttributeDefinition": { "type": "structure", "members": { @@ -10015,6 +10306,12 @@ "traits": { "smithy.api#documentation": "

The data volume definitions for the task.

" } + }, + "Status": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The status of the task definition.\n

" + } } }, "traits": { @@ -11797,55 +12094,251 @@ "smithy.api#documentation": "

\n A schema defines the structure of events that are sent to Amazon EventBridge. Schema registries are containers for \n schemas. They collect and organize schemas so that your schemas are in logical groups. \n

" } }, - "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesCloudTrailDetails": { + "com.amazonaws.securityhub#AwsEventsEndpointDetails": { "type": "structure", "members": { - "Status": { + "Arn": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n Specifies whether CloudTrail is activated as a data source for the detector. \n

" + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the endpoint.\n

" } - } - }, - "traits": { - "smithy.api#documentation": "

\n An object that contains information on the status of CloudTrail as a data source for the detector. \n

" - } - }, - "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesDetails": { - "type": "structure", - "members": { - "CloudTrail": { - "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesCloudTrailDetails", + }, + "Description": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n An object that contains information on the status of CloudTrail as a data source for the detector. \n

" + "smithy.api#documentation": "

\n A description of the endpoint.\n

" } }, - "DnsLogs": { - "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesDnsLogsDetails", + "EndpointId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n An object that contains information on the status of DNS logs as a data source for the detector.\n

" + "smithy.api#documentation": "

\n The URL subdomain of the endpoint. For example, if EndpointUrl is \n https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.

" } }, - "FlowLogs": { - "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesFlowLogsDetails", + "EndpointUrl": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n An object that contains information on the status of VPC Flow Logs as a data source for the detector.\n

" + "smithy.api#documentation": "

\n The URL of the endpoint.

" } }, - "Kubernetes": { - "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesKubernetesDetails", + "EventBuses": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointEventBusesList", "traits": { - "smithy.api#documentation": "

\n An object that contains information on the status of Kubernetes data sources for the detector.\n

" + "smithy.api#documentation": "

\n The event buses being used by the endpoint.

" } }, - "MalwareProtection": { - "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesMalwareProtectionDetails", + "Name": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n An object that contains information on the status of Malware Protection as a data source for the detector.\n

" + "smithy.api#documentation": "

\n The name of the endpoint.

" } }, - "S3Logs": { - "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesS3LogsDetails", + "ReplicationConfig": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointReplicationConfigDetails", + "traits": { + "smithy.api#documentation": "

\n Whether event replication was enabled or disabled for this endpoint. The default state is ENABLED, which \n means you must supply a RoleArn. If you don't have a RoleArn or you don't want event \n replication enabled, set the state to DISABLED.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ARN of the role used by event replication for the endpoint.

" + } + }, + "RoutingConfig": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigDetails", + "traits": { + "smithy.api#documentation": "

\n The routing configuration of the endpoint.

" + } + }, + "State": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The current state of the endpoint.

" + } + }, + "StateReason": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The reason the endpoint is in its current state.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about an Amazon EventBridge global endpoint. The endpoint can improve your application’s \n availability by making it Regional-fault tolerant.\n

" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointEventBusesDetails": { + "type": "structure", + "members": { + "EventBusArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the event bus that the endpoint is associated with.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about the Amazon EventBridge event buses that the endpoint is associated with.

" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointEventBusesList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointEventBusesDetails" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointReplicationConfigDetails": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The state of event replication.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Indicates whether replication is enabled or disabled for the endpoint. If enabled, the endpoint can replicate all \n events to a secondary Amazon Web Services Region.

" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigDetails": { + "type": "structure", + "members": { + "FailoverConfig": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigFailoverConfigDetails", + "traits": { + "smithy.api#documentation": "

\n The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about the routing configuration of the endpoint.

" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigFailoverConfigDetails": { + "type": "structure", + "members": { + "Primary": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigFailoverConfigPrimaryDetails", + "traits": { + "smithy.api#documentation": "

\n The main Region of the endpoint.

" + } + }, + "Secondary": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigFailoverConfigSecondaryDetails", + "traits": { + "smithy.api#documentation": "

\n The Region that events are routed to when failover is triggered or event replication is enabled.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered.

" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigFailoverConfigPrimaryDetails": { + "type": "structure", + "members": { + "HealthCheck": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the health check used by the endpoint to determine whether failover is triggered.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about the primary Amazon Web Services Region of the endpoint.

" + } + }, + "com.amazonaws.securityhub#AwsEventsEndpointRoutingConfigFailoverConfigSecondaryDetails": { + "type": "structure", + "members": { + "Route": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Defines the secondary Region.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The Amazon Web Services Region that events are routed to when failover is triggered or event replication is enabled.

" + } + }, + "com.amazonaws.securityhub#AwsEventsEventbusDetails": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the account permitted to write events to the current account.

" + } + }, + "Name": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The name of the event bus.

" + } + }, + "Policy": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The policy that enables the external account to send events to your account.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about Amazon EventBridge event bus. An event bus is a router that receives events and delivers \n them to zero or more destinations, or targets. This can be a custom event bus which you can use to receive events \n from your custom applications and services, or it can be a partner event bus which can be matched to a partner event \n source.

" + } + }, + "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesCloudTrailDetails": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n Specifies whether CloudTrail is activated as a data source for the detector. \n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n An object that contains information on the status of CloudTrail as a data source for the detector. \n

" + } + }, + "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesDetails": { + "type": "structure", + "members": { + "CloudTrail": { + "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesCloudTrailDetails", + "traits": { + "smithy.api#documentation": "

\n An object that contains information on the status of CloudTrail as a data source for the detector. \n

" + } + }, + "DnsLogs": { + "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesDnsLogsDetails", + "traits": { + "smithy.api#documentation": "

\n An object that contains information on the status of DNS logs as a data source for the detector.\n

" + } + }, + "FlowLogs": { + "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesFlowLogsDetails", + "traits": { + "smithy.api#documentation": "

\n An object that contains information on the status of VPC Flow Logs as a data source for the detector.\n

" + } + }, + "Kubernetes": { + "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesKubernetesDetails", + "traits": { + "smithy.api#documentation": "

\n An object that contains information on the status of Kubernetes data sources for the detector.\n

" + } + }, + "MalwareProtection": { + "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesMalwareProtectionDetails", + "traits": { + "smithy.api#documentation": "

\n An object that contains information on the status of Malware Protection as a data source for the detector.\n

" + } + }, + "S3Logs": { + "target": "com.amazonaws.securityhub#AwsGuardDutyDetectorDataSourcesS3LogsDetails", "traits": { "smithy.api#documentation": "

\n An object that contains information on the status of S3 Data event logs as a data source for the detector.\n

" } @@ -12982,178 +13475,398 @@ "PackageType": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The type of deployment package that's used to deploy the function code to Lambda. Set to Image for a container image and Zip for a .zip file archive.\n

" + "smithy.api#documentation": "

The type of deployment package that's used to deploy the function code to Lambda. Set to Image for a container image and Zip for a .zip file archive.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about an Lambda function's configuration.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaFunctionEnvironment": { + "type": "structure", + "members": { + "Variables": { + "target": "com.amazonaws.securityhub#FieldMap", + "traits": { + "smithy.api#documentation": "

Environment variable key-value pairs.

" + } + }, + "Error": { + "target": "com.amazonaws.securityhub#AwsLambdaFunctionEnvironmentError", + "traits": { + "smithy.api#documentation": "

An AwsLambdaFunctionEnvironmentError object.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A function's environment variable settings.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaFunctionEnvironmentError": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The error code.

" + } + }, + "Message": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The error message.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Error messages for environment variables that could not be applied.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaFunctionLayer": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The ARN of the function layer.

" + } + }, + "CodeSize": { + "target": "com.amazonaws.securityhub#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The size of the layer archive in bytes.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An Lambda layer.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaFunctionLayerList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsLambdaFunctionLayer" + } + }, + "com.amazonaws.securityhub#AwsLambdaFunctionTracingConfig": { + "type": "structure", + "members": { + "Mode": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The tracing mode.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The function's X-Ray tracing configuration.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaFunctionVpcConfig": { + "type": "structure", + "members": { + "SecurityGroupIds": { + "target": "com.amazonaws.securityhub#NonEmptyStringList", + "traits": { + "smithy.api#documentation": "

A list of VPC security groups IDs.

" + } + }, + "SubnetIds": { + "target": "com.amazonaws.securityhub#NonEmptyStringList", + "traits": { + "smithy.api#documentation": "

A list of VPC subnet IDs.

" + } + }, + "VpcId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The ID of the VPC.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The VPC security groups and subnets that are attached to a Lambda function.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaLayerVersionDetails": { + "type": "structure", + "members": { + "Version": { + "target": "com.amazonaws.securityhub#AwsLambdaLayerVersionNumber", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The version number.

" + } + }, + "CompatibleRuntimes": { + "target": "com.amazonaws.securityhub#NonEmptyStringList", + "traits": { + "smithy.api#documentation": "

The layer's compatible runtimes. Maximum number of five items.

\n

Valid values: nodejs10.x | nodejs12.x | java8 |\n java11 | python2.7 | python3.6 |\n python3.7 | python3.8 | dotnetcore1.0 |\n dotnetcore2.1 | go1.x | ruby2.5 |\n provided\n

" + } + }, + "CreatedDate": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

Indicates when the version was created.

\n

Uses the date-time format specified in RFC 3339 section 5.6, Internet\n Date/Time Format. The value cannot contain spaces, and date and time should be separated by T. For example,\n 2020-03-22T13:22:13.933Z.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about a Lambda layer version.

" + } + }, + "com.amazonaws.securityhub#AwsLambdaLayerVersionNumber": { + "type": "long", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.securityhub#AwsMountPoint": { + "type": "structure", + "members": { + "SourceVolume": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The name of the volume to mount. Must be a volume name referenced in the name parameter \nof task definition volume.\n

" + } + }, + "ContainerPath": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The path on the container to mount the host volume at.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details for a volume mount point that's used in a container definition.\n

" + } + }, + "com.amazonaws.securityhub#AwsMountPointList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsMountPoint" + } + }, + "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationDetails": { + "type": "structure", + "members": { + "Sasl": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationSaslDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details for client authentication using SASL.

" + } + }, + "Unauthenticated": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationUnauthenticatedDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details for allowing no client authentication.

" + } + }, + "Tls": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationTlsDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details for client authentication using TLS.

" } } }, "traits": { - "smithy.api#documentation": "

Details about an Lambda function's configuration.

" + "smithy.api#documentation": "

\n Provides details about different modes of client authentication.

" } }, - "com.amazonaws.securityhub#AwsLambdaFunctionEnvironment": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationSaslDetails": { "type": "structure", "members": { - "Variables": { - "target": "com.amazonaws.securityhub#FieldMap", + "Iam": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationSaslIamDetails", "traits": { - "smithy.api#documentation": "

Environment variable key-value pairs.

" + "smithy.api#documentation": "

\n Provides details for SASL client authentication using IAM.

" } }, - "Error": { - "target": "com.amazonaws.securityhub#AwsLambdaFunctionEnvironmentError", + "Scram": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationSaslScramDetails", "traits": { - "smithy.api#documentation": "

An AwsLambdaFunctionEnvironmentError object.

" + "smithy.api#documentation": "

\n Details for SASL client authentication using SCRAM.

" } } }, "traits": { - "smithy.api#documentation": "

A function's environment variable settings.

" + "smithy.api#documentation": "

\n Provides details for client authentication using SASL.

" } }, - "com.amazonaws.securityhub#AwsLambdaFunctionEnvironmentError": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationSaslIamDetails": { "type": "structure", "members": { - "ErrorCode": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Enabled": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

The error code.

" + "smithy.api#default": false, + "smithy.api#documentation": "

\n Indicates whether SASL/IAM authentication is enabled or not.

" } - }, - "Message": { - "target": "com.amazonaws.securityhub#NonEmptyString", + } + }, + "traits": { + "smithy.api#documentation": "

\n Details for SASL/IAM client authentication.

" + } + }, + "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationSaslScramDetails": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

The error message.

" + "smithy.api#default": false, + "smithy.api#documentation": "

\n Indicates whether SASL/SCRAM authentication is enabled or not.

" } } }, "traits": { - "smithy.api#documentation": "

Error messages for environment variables that could not be applied.

" + "smithy.api#documentation": "

\n Details for SASL/SCRAM client authentication.

" } }, - "com.amazonaws.securityhub#AwsLambdaFunctionLayer": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationTlsDetails": { "type": "structure", "members": { - "Arn": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "CertificateAuthorityArnList": { + "target": "com.amazonaws.securityhub#StringList", "traits": { - "smithy.api#documentation": "

The ARN of the function layer.

" + "smithy.api#documentation": "

\n List of Amazon Web Services Private CA Amazon Resource Names (ARNs). Amazon Web Services Private CA enables creation of \nprivate certificate authority (CA) hierarchies, including root and subordinate CAs, without the investment and maintenance costs \nof operating an on-premises CA.

" } }, - "CodeSize": { - "target": "com.amazonaws.securityhub#Integer", + "Enabled": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The size of the layer archive in bytes.

" + "smithy.api#default": false, + "smithy.api#documentation": "

\n Indicates whether TLS authentication is enabled or not.

" } } }, "traits": { - "smithy.api#documentation": "

An Lambda layer.

" - } - }, - "com.amazonaws.securityhub#AwsLambdaFunctionLayerList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsLambdaFunctionLayer" + "smithy.api#documentation": "

\n Provides details for client authentication using TLS.

" } }, - "com.amazonaws.securityhub#AwsLambdaFunctionTracingConfig": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationUnauthenticatedDetails": { "type": "structure", "members": { - "Mode": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Enabled": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

The tracing mode.

" + "smithy.api#default": false, + "smithy.api#documentation": "

\n Indicates whether unauthenticated is allowed or not.

" } } }, "traits": { - "smithy.api#documentation": "

The function's X-Ray tracing configuration.

" + "smithy.api#documentation": "

\n Provides details for allowing no client authentication.\n

" } }, - "com.amazonaws.securityhub#AwsLambdaFunctionVpcConfig": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoDetails": { "type": "structure", "members": { - "SecurityGroupIds": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "EncryptionInfo": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoEncryptionInfoDetails", "traits": { - "smithy.api#documentation": "

A list of VPC security groups IDs.

" + "smithy.api#documentation": "

\n Includes encryption-related information, such as the KMS key used for encrypting data at rest and \nwhether you want Amazon MSK to encrypt your data in transit.

" } }, - "SubnetIds": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "CurrentVersion": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A list of VPC subnet IDs.

" + "smithy.api#documentation": "

\n The current version of the MSK cluster.

" } }, - "VpcId": { + "NumberOfBrokerNodes": { + "target": "com.amazonaws.securityhub#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The number of broker nodes in the cluster.

" + } + }, + "ClusterName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The ID of the VPC.

" + "smithy.api#documentation": "

\n The name of the cluster.

" + } + }, + "ClientAuthentication": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoClientAuthenticationDetails", + "traits": { + "smithy.api#documentation": "

\n Provides information for different modes of client authentication.

" } } }, "traits": { - "smithy.api#documentation": "

The VPC security groups and subnets that are attached to a Lambda function.

" + "smithy.api#documentation": "

\n Provide details about an Amazon MSK cluster.

" } }, - "com.amazonaws.securityhub#AwsLambdaLayerVersionDetails": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoEncryptionInfoDetails": { "type": "structure", "members": { - "Version": { - "target": "com.amazonaws.securityhub#AwsLambdaLayerVersionNumber", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The version number.

" - } - }, - "CompatibleRuntimes": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "EncryptionInTransit": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoEncryptionInfoEncryptionInTransitDetails", "traits": { - "smithy.api#documentation": "

The layer's compatible runtimes. Maximum number of five items.

\n

Valid values: nodejs10.x | nodejs12.x | java8 |\n java11 | python2.7 | python3.6 |\n python3.7 | python3.8 | dotnetcore1.0 |\n dotnetcore2.1 | go1.x | ruby2.5 |\n provided\n

" + "smithy.api#documentation": "

\n The settings for encrypting data in transit.

" } }, - "CreatedDate": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "EncryptionAtRest": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoEncryptionInfoEncryptionAtRestDetails", "traits": { - "smithy.api#documentation": "

Indicates when the version was created.

\n

Uses the date-time format specified in RFC 3339 section 5.6, Internet\n Date/Time Format. The value cannot contain spaces, and date and time should be separated by T. For example,\n 2020-03-22T13:22:13.933Z.

" + "smithy.api#documentation": "

\n The data-volume encryption details. You can't update encryption at rest settings for existing clusters.

" } } }, "traits": { - "smithy.api#documentation": "

Details about a Lambda layer version.

" + "smithy.api#documentation": "

\n Includes encryption-related information, such as the KMS key used for encrypting data at rest and \nwhether you want MSK to encrypt your data in transit.

" } }, - "com.amazonaws.securityhub#AwsLambdaLayerVersionNumber": { - "type": "long", + "com.amazonaws.securityhub#AwsMskClusterClusterInfoEncryptionInfoEncryptionAtRestDetails": { + "type": "structure", + "members": { + "DataVolumeKMSKeyId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the KMS key for encrypting data at rest. If you don't specify a \nKMS key, MSK creates one for you and uses it.

" + } + } + }, "traits": { - "smithy.api#default": 0 + "smithy.api#documentation": "

\n The data-volume encryption details. You can't update encryption at rest settings for existing clusters.

" } }, - "com.amazonaws.securityhub#AwsMountPoint": { + "com.amazonaws.securityhub#AwsMskClusterClusterInfoEncryptionInfoEncryptionInTransitDetails": { "type": "structure", "members": { - "SourceVolume": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "InCluster": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

The name of the volume to mount. Must be a volume name referenced in the name parameter \nof task definition volume.\n

" + "smithy.api#default": false, + "smithy.api#documentation": "

\n When set to true, it indicates that data communication among the broker nodes of the cluster is \n encrypted. When set to false, the communication happens in plain text. The default value is \n true.

" } }, - "ContainerPath": { + "ClientBroker": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The path on the container to mount the host volume at.\n

" + "smithy.api#documentation": "

\n Indicates the encryption setting for data in transit between clients and brokers.

" } } }, "traits": { - "smithy.api#documentation": "

Details for a volume mount point that's used in a container definition.\n

" + "smithy.api#documentation": "

\n The settings for encrypting data in transit.

" } }, - "com.amazonaws.securityhub#AwsMountPointList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsMountPoint" + "com.amazonaws.securityhub#AwsMskClusterDetails": { + "type": "structure", + "members": { + "ClusterInfo": { + "target": "com.amazonaws.securityhub#AwsMskClusterClusterInfoDetails", + "traits": { + "smithy.api#documentation": "

\n Provides information about a cluster.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about an Amazon Managed Streaming for Apache Kafka (Amazon MSK) cluster. \n

" } }, "com.amazonaws.securityhub#AwsNetworkFirewallFirewallDetails": { @@ -14039,6 +14752,13 @@ "smithy.api#default": false, "smithy.api#documentation": "

Whether the mapping of IAM accounts to database accounts is enabled.

" } + }, + "AutoMinorVersionUpgrade": { + "target": "com.amazonaws.securityhub#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

\n Indicates if minor version upgrades are automatically applied to the cluster.

" + } } }, "traits": { @@ -16218,6 +16938,124 @@ "target": "com.amazonaws.securityhub#AwsRedshiftClusterVpcSecurityGroup" } }, + "com.amazonaws.securityhub#AwsRoute53HostedZoneConfigDetails": { + "type": "structure", + "members": { + "Comment": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

Any comments that you include about the hosted zone.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n An object that contains an optional comment about your Amazon Route 53 hosted zone.

" + } + }, + "com.amazonaws.securityhub#AwsRoute53HostedZoneDetails": { + "type": "structure", + "members": { + "HostedZone": { + "target": "com.amazonaws.securityhub#AwsRoute53HostedZoneObjectDetails", + "traits": { + "smithy.api#documentation": "

\n An object that contains information about the specified hosted zone.

" + } + }, + "Vpcs": { + "target": "com.amazonaws.securityhub#AwsRoute53HostedZoneVpcsList", + "traits": { + "smithy.api#documentation": "

\n An object that contains information about the Amazon Virtual Private Clouds (Amazon VPCs) that are associated with \nthe specified hosted zone.

" + } + }, + "NameServers": { + "target": "com.amazonaws.securityhub#AwsRoute53HostedZoneNameServersList", + "traits": { + "smithy.api#documentation": "

\n An object that contains a list of the authoritative name servers for a hosted zone or for a reusable delegation set.

" + } + }, + "QueryLoggingConfig": { + "target": "com.amazonaws.securityhub#AwsRoute53QueryLoggingConfigDetails", + "traits": { + "smithy.api#documentation": "

\n An array that contains one QueryLoggingConfig element for each DNS query logging configuration that is \nassociated with the current Amazon Web Services account.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about a specified Amazon Route 53 hosted zone, including the four name servers assigned to \nthe hosted zone. A hosted zone represents a collection of records that can be managed together, belonging to a single parent \ndomain name.

" + } + }, + "com.amazonaws.securityhub#AwsRoute53HostedZoneNameServersList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#NonEmptyString" + } + }, + "com.amazonaws.securityhub#AwsRoute53HostedZoneObjectDetails": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ID that Route 53 assigns to the hosted zone when you create it.

" + } + }, + "Name": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The name of the domain. For public hosted zones, this is the name that you have registered with your DNS registrar.

" + } + }, + "Config": { + "target": "com.amazonaws.securityhub#AwsRoute53HostedZoneConfigDetails", + "traits": { + "smithy.api#documentation": "

\n An object that includes the Comment element.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n An object that contains information about an Amazon Route 53 hosted zone.

" + } + }, + "com.amazonaws.securityhub#AwsRoute53HostedZoneVpcDetails": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The identifier of an Amazon VPC.\n

" + } + }, + "Region": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The Amazon Web Services Region that an Amazon VPC was created in.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n For private hosted zones, this is a complex type that contains information about an Amazon VPC.

" + } + }, + "com.amazonaws.securityhub#AwsRoute53HostedZoneVpcsList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsRoute53HostedZoneVpcDetails" + } + }, + "com.amazonaws.securityhub#AwsRoute53QueryLoggingConfigDetails": { + "type": "structure", + "members": { + "CloudWatchLogsLogGroupArn": { + "target": "com.amazonaws.securityhub#CloudWatchLogsLogGroupArnConfigDetails", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log group that Route 53 is publishing logs to.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Provides details about a specified Amazon Route 53 configuration for DNS query logging.

" + } + }, "com.amazonaws.securityhub#AwsS3AccountPublicAccessBlockDetails": { "type": "structure", "members": { @@ -21030,6 +21868,32 @@ "smithy.api#documentation": "

Provides details about the current status of the sensitive data detection.

" } }, + "com.amazonaws.securityhub#CloudWatchLogsLogGroupArnConfigDetails": { + "type": "structure", + "members": { + "CloudWatchLogsLogGroupArn": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ARN of the CloudWatch Logs log group that Route 53 is publishing logs to.

" + } + }, + "HostedZoneId": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ID of the hosted zone that CloudWatch Logs is logging queries for.

" + } + }, + "Id": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "

\n The ID for a DNS query logging configuration.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) and other details of the Amazon CloudWatch Logs log group that Amazon Route 53 is \npublishing logs to.

" + } + }, "com.amazonaws.securityhub#CodeVulnerabilitiesFilePath": { "type": "structure", "members": { @@ -28189,6 +29053,48 @@ "traits": { "smithy.api#documentation": "

\n Provides information about an Amazon Athena workgroup. A workgroup helps you separate users, teams, \n applications, or workloads. It also helps you set limits on data processing and track costs.\n

" } + }, + "AwsEventsEventbus": { + "target": "com.amazonaws.securityhub#AwsEventsEventbusDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about Amazon EventBridge event bus for an endpoint. An event bus is a router that receives events \nand delivers them to zero or more destinations, or targets.

" + } + }, + "AwsDmsEndpoint": { + "target": "com.amazonaws.securityhub#AwsDmsEndpointDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about an Database Migration Service (DMS) endpoint. An endpoint provides connection, data \nstore type, and location information about your data store.

" + } + }, + "AwsEventsEndpoint": { + "target": "com.amazonaws.securityhub#AwsEventsEndpointDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about an Amazon EventBridge global endpoint. The endpoint can improve your application’s \navailability by making it Regional-fault tolerant.

" + } + }, + "AwsDmsReplicationTask": { + "target": "com.amazonaws.securityhub#AwsDmsReplicationTaskDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about an DMS replication task. A replication task moves a set of data from the source \nendpoint to the target endpoint.

" + } + }, + "AwsDmsReplicationInstance": { + "target": "com.amazonaws.securityhub#AwsDmsReplicationInstanceDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about an DMS replication instance. DMS uses a replication instance to connect to your \nsource data store, read the source data, and format the data for consumption by the target data store.

" + } + }, + "AwsRoute53HostedZone": { + "target": "com.amazonaws.securityhub#AwsRoute53HostedZoneDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about an Amazon Route 53 hosted zone, including the four name servers assigned to the hosted \nzone. A hosted zone represents a collection of records that can be managed together, belonging to a single parent domain name.

" + } + }, + "AwsMskCluster": { + "target": "com.amazonaws.securityhub#AwsMskClusterDetails", + "traits": { + "smithy.api#documentation": "

\n Provides details about an Amazon Managed Streaming for Apache Kafka (Amazon MSK) cluster.

" + } } }, "traits": { diff --git a/models/storage-gateway.json b/models/storage-gateway.json index 067147d1b5..dfbfda5e9b 100644 --- a/models/storage-gateway.json +++ b/models/storage-gateway.json @@ -890,7 +890,7 @@ "AverageUploadRateLimitInBitsPerSec": { "target": "com.amazonaws.storagegateway#BandwidthUploadRateLimit", "traits": { - "smithy.api#documentation": "

The average upload rate limit component of the bandwidth rate limit interval, in bits\n per second. This field does not appear in the response if the upload rate limit is not set.\n

" + "smithy.api#documentation": "

The average upload rate limit component of the bandwidth rate limit interval, in bits\n per second. This field does not appear in the response if the upload rate limit is not set.

\n \n

For Tape Gateway and Volume Gateway, the minimum value is 51200.

\n

For S3 File Gateway and FSx File Gateway, the minimum value is\n 104857600.

\n
" } }, "AverageDownloadRateLimitInBitsPerSec": { @@ -3592,7 +3592,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns metadata about a gateway such as its name, network interfaces, configured time\n zone, and the state (whether the gateway is running or not). To specify which gateway to\n describe, use the Amazon Resource Name (ARN) of the gateway in your request.

", + "smithy.api#documentation": "

Returns metadata about a gateway such as its name, network interfaces, time zone,\n status, and software version. To specify which gateway to describe, use the Amazon Resource\n Name (ARN) of the gateway in your request.

", "smithy.api#examples": [ { "title": "To describe metadata about the gateway", @@ -3759,6 +3759,12 @@ "traits": { "smithy.api#documentation": "

A unique identifier for the specific instance of the host platform running the gateway.\n This value is only available for certain host environments, and its format depends on the\n host environment type.

" } + }, + "SoftwareVersion": { + "target": "com.amazonaws.storagegateway#SoftwareVersion", + "traits": { + "smithy.api#documentation": "

The version number of the software running on the gateway appliance.

" + } } }, "traits": { @@ -6211,7 +6217,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds a file gateway to an Active Directory domain. This operation is only supported for\n file gateways that support the SMB file protocol.

" + "smithy.api#documentation": "

Adds a file gateway to an Active Directory domain. This operation is only supported for\n file gateways that support the SMB file protocol.

\n \n

Joining a domain creates an Active Directory computer account in the default\n organizational unit, using the gateway's Gateway ID as\n the account name (for example, SGW-1234ADE). If your Active Directory environment\n requires that you pre-stage accounts to facilitate the join domain process, you will\n need to create this account ahead of time.

\n

To create the gateway's computer account in an organizational unit other than the\n default, you must specify the organizational unit when joining the domain.

\n
" } }, "com.amazonaws.storagegateway#JoinDomainInput": { @@ -6298,7 +6304,7 @@ "min": 7, "max": 2048 }, - "smithy.api#pattern": "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$" + "smithy.api#pattern": "^(^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$" } }, "com.amazonaws.storagegateway#LastSoftwareUpdate": { @@ -6372,7 +6378,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a list of the file shares for a specific S3 File Gateway, or the list of file\n shares that belong to the calling user account. This operation is only supported for S3\n File Gateways.

", + "smithy.api#documentation": "

Gets a list of the file shares for a specific S3 File Gateway, or the list of file\n shares that belong to the calling Amazon Web Services account. This operation is only\n supported for S3 File Gateways.

", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextMarker", @@ -7386,7 +7392,8 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a gateway's network interface.

" + "smithy.api#documentation": "

Describes a gateway's network interface.

", + "smithy.api#sensitive": {} } }, "com.amazonaws.storagegateway#NetworkInterfaceId": { @@ -7441,7 +7448,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends you notification through CloudWatch Events when all files written to your file\n share have been uploaded to S3. Amazon S3.

\n

Storage Gateway can send a notification through Amazon CloudWatch Events when all\n files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you\n make a request for notification. When the upload is done, Storage Gateway sends you\n notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to\n send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways.

\n

For more information, see Getting file upload notification in the Storage Gateway User\n Guide.

" + "smithy.api#documentation": "

Sends you notification through CloudWatch Events when all files written to your file\n share have been uploaded to S3. Amazon S3.

\n

Storage Gateway can send a notification through Amazon CloudWatch Events when all\n files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you\n make a request for notification. When the upload is done, Storage Gateway sends you\n notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to\n send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways.

\n

For more information, see Getting\n file upload notification in the Amazon S3 File Gateway User\n Guide.

" } }, "com.amazonaws.storagegateway#NotifyWhenUploadedInput": { @@ -7700,7 +7707,7 @@ } ], "traits": { - "smithy.api#documentation": "

Refreshes the cached inventory of objects for the specified file share. This operation\n finds objects in the Amazon S3 bucket that were added, removed, or replaced since\n the gateway last listed the bucket's contents and cached the results. This operation\n does not import files into the S3 File Gateway cache storage. It only updates the cached\n inventory to reflect changes in the inventory of the objects in the S3 bucket. This\n operation is only supported in the S3 File Gateway types.

\n

You can subscribe to be notified through an Amazon CloudWatch event when your\n RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway\n User Guide. This operation is Only supported for S3 File Gateways.

\n

When this API is called, it only initiates the refresh operation. When the API call\n completes and returns a success code, it doesn't necessarily mean that the file\n refresh has completed. You should use the refresh-complete notification to determine that\n the operation has completed before you check for new files on the gateway file share. You\n can subscribe to be notified through a CloudWatch event when your RefreshCache\n operation completes.

\n

Throttle limit: This API is asynchronous, so the gateway will accept no more than two\n refreshes at any time. We recommend using the refresh-complete CloudWatch event\n notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway\n User Guide.

\n \n
    \n
  • \n

    Wait at least 60 seconds between consecutive RefreshCache API requests.

    \n
  • \n
  • \n

    RefreshCache does not evict cache entries if invoked consecutively within 60\n seconds of a previous RefreshCache request.

    \n
  • \n
  • \n

    If you invoke the RefreshCache API when two requests are already being\n processed, any new request will cause an\n InvalidGatewayRequestException error because too many requests\n were sent to the server.

    \n
  • \n
\n
\n \n

The S3 bucket name does not need to be included when entering the list of folders in\n the FolderList parameter.

\n
\n

For more information, see Getting notified about file operations in the Storage Gateway\n User Guide.

" + "smithy.api#documentation": "

Refreshes the cached inventory of objects for the specified file share. This operation\n finds objects in the Amazon S3 bucket that were added, removed, or replaced since\n the gateway last listed the bucket's contents and cached the results. This operation\n does not import files into the S3 File Gateway cache storage. It only updates the cached\n inventory to reflect changes in the inventory of the objects in the S3 bucket. This\n operation is only supported in the S3 File Gateway types.

\n

You can subscribe to be notified through an Amazon CloudWatch event when your\n RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway\n User Guide. This operation is Only supported for S3 File Gateways.

\n

When this API is called, it only initiates the refresh operation. When the API call\n completes and returns a success code, it doesn't necessarily mean that the file\n refresh has completed. You should use the refresh-complete notification to determine that\n the operation has completed before you check for new files on the gateway file share. You\n can subscribe to be notified through a CloudWatch event when your RefreshCache\n operation completes.

\n

Throttle limit: This API is asynchronous, so the gateway will accept no more than two\n refreshes at any time. We recommend using the refresh-complete CloudWatch event\n notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway\n User Guide.

\n \n
    \n
  • \n

    Wait at least 60 seconds between consecutive RefreshCache API requests.

    \n
  • \n
  • \n

    If you invoke the RefreshCache API when two requests are already being\n processed, any new request will cause an\n InvalidGatewayRequestException error because too many requests\n were sent to the server.

    \n
  • \n
\n
\n \n

The S3 bucket name does not need to be included when entering the list of folders in\n the FolderList parameter.

\n
\n

For more information, see Getting notified about file operations in the Storage Gateway\n User Guide.

" } }, "com.amazonaws.storagegateway#RefreshCacheInput": { @@ -8076,7 +8083,7 @@ "min": 20, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):iam::([0-9]+):role/(\\S+)$" + "smithy.api#pattern": "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):iam::([0-9]+):role/(\\S+)$" } }, "com.amazonaws.storagegateway#SMBFileShareInfo": { @@ -8514,6 +8521,9 @@ } } }, + "com.amazonaws.storagegateway#SoftwareVersion": { + "type": "string" + }, "com.amazonaws.storagegateway#Squash": { "type": "string", "traits": { @@ -10198,7 +10208,7 @@ "min": 50, "max": 500 }, - "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$" + "smithy.api#pattern": "^arn:(aws(|-cn|-us-gov|-iso[A-Za-z0-9_-]*)):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{5,16}$" } }, "com.amazonaws.storagegateway#TapeARNs": { @@ -10643,7 +10653,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways\n do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in\n effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This\n operation is supported only for volume, tape and S3 file gateways. FSx file gateways do not\n support bandwidth rate limits.

" + "smithy.api#documentation": "

Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways\n do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in\n effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This\n operation is supported for volume, tape, and S3 file gateways. S3 file gateways support\n bandwidth rate limits for upload only. FSx file gateways do not support bandwidth rate\n limits.

" } }, "com.amazonaws.storagegateway#UpdateBandwidthRateLimitScheduleInput": { diff --git a/models/textract.json b/models/textract.json index 99439ff72d..12e209c0a8 100644 --- a/models/textract.json +++ b/models/textract.json @@ -44,6 +44,305 @@ "smithy.api#error": "client" } }, + "com.amazonaws.textract#Adapter": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A unique identifier for the adapter resource.

", + "smithy.api#required": {} + } + }, + "Pages": { + "target": "com.amazonaws.textract#AdapterPages", + "traits": { + "smithy.api#documentation": "

Pages is a parameter that the user inputs to specify which pages to apply an adapter to. The following is a \n list of rules for using this parameter.

\n
    \n
  • \n

    If a page is not specified, it is set to [\"1\"] by default.

    \n
  • \n
  • \n

    The following characters are allowed in the parameter's string: \n 0 1 2 3 4 5 6 7 8 9 - *. No whitespace is allowed.

    \n
  • \n
  • \n

    When using * to indicate all pages, it must be the only element in the list.

    \n
  • \n
  • \n

    You can use page intervals, such as [\"1-3\", \"1-1\", \"4-*\"]. Where * indicates last page of \n document.

    \n
  • \n
  • \n

    Specified pages must be greater than 0 and less than or equal to the number of pages in the document.

    \n
  • \n
" + } + }, + "Version": { + "target": "com.amazonaws.textract#AdapterVersion", + "traits": { + "smithy.api#documentation": "

A string that identifies the version of the adapter.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An adapter selected for use when analyzing documents. Contains an adapter ID and a version number. \n Contains information on pages selected for analysis when analyzing documents asychronously.

" + } + }, + "com.amazonaws.textract#AdapterDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\s!\"\\#\\$%'&\\(\\)\\*\\+\\,\\-\\./:;=\\?@\\[\\\\\\]\\^_`\\{\\|\\}~><]+$" + } + }, + "com.amazonaws.textract#AdapterId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 1011 + } + } + }, + "com.amazonaws.textract#AdapterList": { + "type": "list", + "member": { + "target": "com.amazonaws.textract#AdapterOverview" + } + }, + "com.amazonaws.textract#AdapterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-_]+$" + } + }, + "com.amazonaws.textract#AdapterOverview": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A unique identifier for the adapter resource.

" + } + }, + "AdapterName": { + "target": "com.amazonaws.textract#AdapterName", + "traits": { + "smithy.api#documentation": "

A string naming the adapter resource.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

The date and time that the adapter was created.

" + } + }, + "FeatureTypes": { + "target": "com.amazonaws.textract#FeatureTypes", + "traits": { + "smithy.api#documentation": "

The feature types that the adapter is operating on.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information on the adapter, including the adapter ID, Name, Creation time, and feature types.

" + } + }, + "com.amazonaws.textract#AdapterPage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 9 + }, + "smithy.api#pattern": "^[0-9\\*\\-]+$" + } + }, + "com.amazonaws.textract#AdapterPages": { + "type": "list", + "member": { + "target": "com.amazonaws.textract#AdapterPage" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.textract#AdapterVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.textract#AdapterVersionDatasetConfig": { + "type": "structure", + "members": { + "ManifestS3Object": { + "target": "com.amazonaws.textract#S3Object" + } + }, + "traits": { + "smithy.api#documentation": "

The dataset configuration options for a given version of an adapter. \n Can include an Amazon S3 bucket if specified.

" + } + }, + "com.amazonaws.textract#AdapterVersionEvaluationMetric": { + "type": "structure", + "members": { + "Baseline": { + "target": "com.amazonaws.textract#EvaluationMetric", + "traits": { + "smithy.api#documentation": "

The F1 score, precision, and recall metrics for the baseline model.

" + } + }, + "AdapterVersion": { + "target": "com.amazonaws.textract#EvaluationMetric", + "traits": { + "smithy.api#documentation": "

The F1 score, precision, and recall metrics for the baseline model.

" + } + }, + "FeatureType": { + "target": "com.amazonaws.textract#FeatureType", + "traits": { + "smithy.api#documentation": "

Indicates the feature type being analyzed by a given adapter version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information on the metrics used to evalute the peformance of a given adapter version. Includes data for \n baseline model performance and individual adapter version perfromance.

" + } + }, + "com.amazonaws.textract#AdapterVersionEvaluationMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.textract#AdapterVersionEvaluationMetric" + } + }, + "com.amazonaws.textract#AdapterVersionList": { + "type": "list", + "member": { + "target": "com.amazonaws.textract#AdapterVersionOverview" + } + }, + "com.amazonaws.textract#AdapterVersionOverview": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A unique identifier for the adapter associated with a given adapter version.

" + } + }, + "AdapterVersion": { + "target": "com.amazonaws.textract#AdapterVersion", + "traits": { + "smithy.api#documentation": "

An identified for a given adapter version.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

The date and time that a given adapter version was created.

" + } + }, + "FeatureTypes": { + "target": "com.amazonaws.textract#FeatureTypes", + "traits": { + "smithy.api#documentation": "

The feature types that the adapter version is operating on.

" + } + }, + "Status": { + "target": "com.amazonaws.textract#AdapterVersionStatus", + "traits": { + "smithy.api#documentation": "

Contains information on the status of a given adapter version.

" + } + }, + "StatusMessage": { + "target": "com.amazonaws.textract#AdapterVersionStatusMessage", + "traits": { + "smithy.api#documentation": "

A message explaining the status of a given adapter vesion.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary info for an adapter version. Contains information on the AdapterId, AdapterVersion, CreationTime, FeatureTypes, and Status.

" + } + }, + "com.amazonaws.textract#AdapterVersionStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "AT_RISK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AT_RISK" + } + }, + "DEPRECATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEPRECATED" + } + }, + "CREATION_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATION_ERROR" + } + }, + "CREATION_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATION_IN_PROGRESS" + } + } + } + }, + "com.amazonaws.textract#AdapterVersionStatusMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\s!\"\\#\\$%'&\\(\\)\\*\\+\\,\\-\\./:;=\\?@\\[\\\\\\]\\^_`\\{\\|\\}~><]+$" + } + }, + "com.amazonaws.textract#Adapters": { + "type": "list", + "member": { + "target": "com.amazonaws.textract#Adapter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.textract#AdaptersConfig": { + "type": "structure", + "members": { + "Adapters": { + "target": "com.amazonaws.textract#Adapters", + "traits": { + "smithy.api#documentation": "

A list of adapters to be used when analyzing the specified document.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about adapters used when analyzing a document, \n with each adapter specified using an AdapterId and version

" + } + }, + "com.amazonaws.textract#AmazonResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1011 + } + } + }, "com.amazonaws.textract#AnalyzeDocument": { "type": "operation", "input": { @@ -101,7 +400,7 @@ "FeatureTypes": { "target": "com.amazonaws.textract#FeatureTypes", "traits": { - "smithy.api#documentation": "

A list of the types of analysis to perform. Add TABLES to the list to return information\n about the tables that are detected in the input document. Add FORMS to return detected form\n data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list\n to return information about the layout of the document. To perform both forms\n and table analysis, add TABLES and FORMS to FeatureTypes. To detect signatures\n within the document and within form data and table data, add SIGNATURES to either TABLES or\n FORMS. All lines and words detected in the document are included in the response (including\n text that isn't related to the value of FeatureTypes).

", + "smithy.api#documentation": "

A list of the types of analysis to perform. Add TABLES to the list to return information\n about the tables that are detected in the input document. Add FORMS to return detected form\n data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list\n to return information about the layout of the document. All lines and words detected in the document are included in the response (including\n text that isn't related to the value of FeatureTypes).

", "smithy.api#required": {} } }, @@ -116,6 +415,12 @@ "traits": { "smithy.api#documentation": "

Contains Queries and the alias for those Queries, as determined by the input.

" } + }, + "AdaptersConfig": { + "target": "com.amazonaws.textract#AdaptersConfig", + "traits": { + "smithy.api#documentation": "

Specifies the adapter to be used when analyzing a document.

" + } } }, "traits": { @@ -332,6 +637,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.textract#AutoUpdate": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.textract#BadDocumentException": { "type": "structure", "members": { @@ -353,7 +675,7 @@ "BlockType": { "target": "com.amazonaws.textract#BlockType", "traits": { - "smithy.api#documentation": "

The type of text item that's recognized. In operations for text detection, the following\n types are returned:

\n
    \n
  • \n

    \n PAGE - Contains a list of the LINE Block objects\n that are detected on a document page.

    \n
  • \n
  • \n

    \n WORD - A word detected on a document page. A word is one or\n more ISO basic Latin script characters that aren't separated by spaces.

    \n
  • \n
  • \n

    \n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.

    \n
  • \n
\n

In text analysis operations, the following types are returned:

\n
    \n
  • \n

    \n PAGE - Contains a list of child Block objects\n that are detected on a document page.

    \n
  • \n
  • \n

    \n KEY_VALUE_SET - Stores the KEY and VALUE Block\n objects for linked text that's detected on a document page. Use the\n EntityType field to determine if a KEY_VALUE_SET object is a KEY\n Block object or a VALUE Block object.

    \n
  • \n
  • \n

    \n WORD - A word that's detected on a document page. A word is\n one or more ISO basic Latin script characters that aren't separated by spaces.

    \n
  • \n
  • \n

    \n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.

    \n
  • \n
  • \n

    \n TABLE - A table that's detected on a document page. A table\n is grid-based information with two or more rows or columns, with a cell span of one\n row and one column each.

    \n
  • \n
  • \n

    \n TABLE_TITLE - The title of a table. A title is typically a\n line of text above or below a table, or embedded as the first row of a table.

    \n
  • \n
  • \n

    \n TABLE_FOOTER - The footer associated with a table. A footer\n is typically a line or lines of text below a table or embedded as the last row of a\n table.

    \n
  • \n
  • \n

    \n CELL - A cell within a detected table. The cell is the parent\n of the block that contains the text in the cell.

    \n
  • \n
  • \n

    \n MERGED_CELL - A cell in a table whose content spans more than\n one row or column. The Relationships array for this cell contain data\n from individual cells.

    \n
  • \n
  • \n

    \n SELECTION_ELEMENT - A selection element such as an option\n button (radio button) or a check box that's detected on a document page. Use the\n value of SelectionStatus to determine the status of the selection\n element.

    \n
  • \n
  • \n

    \n SIGNATURE - The location and confidence score of a signature detected on a\n document page. Can be returned as part of a Key-Value pair or a detected cell.

    \n
  • \n
  • \n

    \n QUERY - A question asked during the call of AnalyzeDocument. Contains an\n alias and an ID that attaches it to its answer.

    \n
  • \n
  • \n

    \n QUERY_RESULT - A response to a question asked during the call\n of analyze document. Comes with an alias and ID for ease of locating in a \n response. Also contains location and confidence score.

    \n
  • \n
" + "smithy.api#documentation": "

The type of text item that's recognized. In operations for text detection, the following\n types are returned:

\n
    \n
  • \n

    \n PAGE - Contains a list of the LINE Block objects\n that are detected on a document page.

    \n
  • \n
  • \n

    \n WORD - A word detected on a document page. A word is one or\n more ISO basic Latin script characters that aren't separated by spaces.

    \n
  • \n
  • \n

    \n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.

    \n
  • \n
\n

In text analysis operations, the following types are returned:

\n
    \n
  • \n

    \n PAGE - Contains a list of child Block objects\n that are detected on a document page.

    \n
  • \n
  • \n

    \n KEY_VALUE_SET - Stores the KEY and VALUE Block\n objects for linked text that's detected on a document page. Use the\n EntityType field to determine if a KEY_VALUE_SET object is a KEY\n Block object or a VALUE Block object.

    \n
  • \n
  • \n

    \n WORD - A word that's detected on a document page. A word is\n one or more ISO basic Latin script characters that aren't separated by spaces.

    \n
  • \n
  • \n

    \n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.

    \n
  • \n
  • \n

    \n TABLE - A table that's detected on a document page. A table\n is grid-based information with two or more rows or columns, with a cell span of one\n row and one column each.

    \n
  • \n
  • \n

    \n TABLE_TITLE - The title of a table. A title is typically a\n line of text above or below a table, or embedded as the first row of a table.

    \n
  • \n
  • \n

    \n TABLE_FOOTER - The footer associated with a table. A footer\n is typically a line or lines of text below a table or embedded as the last row of a\n table.

    \n
  • \n
  • \n

    \n CELL - A cell within a detected table. The cell is the parent\n of the block that contains the text in the cell.

    \n
  • \n
  • \n

    \n MERGED_CELL - A cell in a table whose content spans more than\n one row or column. The Relationships array for this cell contain data\n from individual cells.

    \n
  • \n
  • \n

    \n SELECTION_ELEMENT - A selection element such as an option\n button (radio button) or a check box that's detected on a document page. Use the\n value of SelectionStatus to determine the status of the selection\n element.

    \n
  • \n
  • \n

    \n SIGNATURE - The location and confidence score of a signature detected on a\n document page. Can be returned as part of a Key-Value pair or a detected cell.

    \n
  • \n
  • \n

    \n QUERY - A question asked during the call of AnalyzeDocument. Contains an\n alias and an ID that attaches it to its answer.

    \n
  • \n
  • \n

    \n QUERY_RESULT - A response to a question asked during the call\n of analyze document. Comes with an alias and ID for ease of locating in a \n response. Also contains location and confidence score.

    \n
  • \n
\n

The following BlockTypes are only returned for Amazon Textract Layout.

\n
    \n
  • \n

    \n LAYOUT_TITLE - The main title of the document.

    \n
  • \n
  • \n

    \n LAYOUT_HEADER - Text located in the top margin of the document.

    \n
  • \n
  • \n

    \n LAYOUT_FOOTER - Text located in the bottom margin of the document.

    \n
  • \n
  • \n

    \n LAYOUT_SECTION_HEADER - The titles of sections within a document.

    \n
  • \n
  • \n

    \n LAYOUT_PAGE_NUMBER - The page number of the documents.

    \n
  • \n
  • \n

    \n LAYOUT_LIST - Any information grouped together in list form.

    \n
  • \n
  • \n

    \n LAYOUT_FIGURE - Indicates the location of an image in a document.

    \n
  • \n
  • \n

    \n LAYOUT_TABLE - Indicates the location of a table in the document.

    \n
  • \n
  • \n

    \n LAYOUT_KEY_VALUE - Indicates the location of form key-values in a document.

    \n
  • \n
  • \n

    \n LAYOUT_TEXT - Text that is present typically as a part of paragraphs in documents.

    \n
  • \n
" } }, "Confidence": { @@ -646,6 +968,21 @@ "smithy.api#pattern": "^[a-zA-Z0-9-_]+$" } }, + "com.amazonaws.textract#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.textract#String" + }, + "Code": { + "target": "com.amazonaws.textract#String" + } + }, + "traits": { + "smithy.api#documentation": "

Updating or deleting a resource can cause an inconsistent state.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.textract#ContentClassifier": { "type": "enum", "members": { @@ -675,23 +1012,23 @@ } } }, - "com.amazonaws.textract#DetectDocumentText": { + "com.amazonaws.textract#CreateAdapter": { "type": "operation", "input": { - "target": "com.amazonaws.textract#DetectDocumentTextRequest" + "target": "com.amazonaws.textract#CreateAdapterRequest" }, "output": { - "target": "com.amazonaws.textract#DetectDocumentTextResponse" + "target": "com.amazonaws.textract#CreateAdapterResponse" }, "errors": [ { "target": "com.amazonaws.textract#AccessDeniedException" }, { - "target": "com.amazonaws.textract#BadDocumentException" + "target": "com.amazonaws.textract#ConflictException" }, { - "target": "com.amazonaws.textract#DocumentTooLargeException" + "target": "com.amazonaws.textract#IdempotentParameterMismatchException" }, { "target": "com.amazonaws.textract#InternalServerError" @@ -700,35 +1037,394 @@ "target": "com.amazonaws.textract#InvalidParameterException" }, { - "target": "com.amazonaws.textract#InvalidS3ObjectException" + "target": "com.amazonaws.textract#LimitExceededException" }, { "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" }, + { + "target": "com.amazonaws.textract#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.textract#ThrottlingException" }, { - "target": "com.amazonaws.textract#UnsupportedDocumentException" + "target": "com.amazonaws.textract#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Detects text in the input document. Amazon Textract can detect lines of text and the\n words that make up a line of text. The input document must be in one of the following image\n formats: JPEG, PNG, PDF, or TIFF. DetectDocumentText returns the detected\n text in an array of Block objects.

\n

Each document page has as an associated Block of type PAGE. Each PAGE Block object\n is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is\n a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

\n

\n DetectDocumentText is a synchronous operation. To analyze documents \n asynchronously, use StartDocumentTextDetection.

\n

For more information, see Document Text Detection.

" + "smithy.api#documentation": "

Creates an adapter, which can be fine-tuned for enhanced performance on user provided\n documents. Takes an AdapterName and FeatureType. Currently the only supported feature type\n is QUERIES. You can also provide a Description, Tags, and a\n ClientRequestToken. You can choose whether or not the adapter should be AutoUpdated with\n the AutoUpdate argument. By default, AutoUpdate is set to DISABLED.

", + "smithy.api#idempotent": {} } }, - "com.amazonaws.textract#DetectDocumentTextRequest": { + "com.amazonaws.textract#CreateAdapterRequest": { "type": "structure", "members": { - "Document": { - "target": "com.amazonaws.textract#Document", + "AdapterName": { + "target": "com.amazonaws.textract#AdapterName", "traits": { - "smithy.api#documentation": "

The input document as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI\n to call Amazon Textract operations, you can't pass image bytes. The document must be an image \n in JPEG or PNG format.

\n

If you're using an AWS SDK to call Amazon Textract, you might not need to base64-encode\n image bytes that are passed using the Bytes field.

", + "smithy.api#documentation": "

The name to be assigned to the adapter being created.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#input": {} + }, + "ClientRequestToken": { + "target": "com.amazonaws.textract#ClientRequestToken", + "traits": { + "smithy.api#documentation": "

Idempotent token is used to recognize the request. If the same token is used with multiple \n CreateAdapter requests, the same session is returned. \n This token is employed to avoid unintentionally creating the same session multiple times.

", + "smithy.api#idempotencyToken": {} + } + }, + "Description": { + "target": "com.amazonaws.textract#AdapterDescription", + "traits": { + "smithy.api#documentation": "

The description to be assigned to the adapter being created.

" + } + }, + "FeatureTypes": { + "target": "com.amazonaws.textract#FeatureTypes", + "traits": { + "smithy.api#documentation": "

The type of feature that the adapter is being trained on. Currrenly, supported feature\n types are: QUERIES\n

", + "smithy.api#required": {} + } + }, + "AutoUpdate": { + "target": "com.amazonaws.textract#AutoUpdate", + "traits": { + "smithy.api#documentation": "

Controls whether or not the adapter should automatically update.

" + } + }, + "Tags": { + "target": "com.amazonaws.textract#TagMap", + "traits": { + "smithy.api#documentation": "

A list of tags to be added to the adapter.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#CreateAdapterResponse": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing the unique ID for the adapter that has been created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#CreateAdapterVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#CreateAdapterVersionRequest" + }, + "output": { + "target": "com.amazonaws.textract#CreateAdapterVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#ConflictException" + }, + { + "target": "com.amazonaws.textract#IdempotentParameterMismatchException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidKMSKeyException" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#InvalidS3ObjectException" + }, + { + "target": "com.amazonaws.textract#LimitExceededException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new version of an adapter. Operates on a provided AdapterId and a specified \n dataset provided via the DatasetConfig argument. Requires that you \n specify an Amazon S3 bucket with the OutputConfig argument. You can provide an optional KMSKeyId, \n an optional ClientRequestToken, and optional tags.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.textract#CreateAdapterVersionRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter that will receive a new version.

", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.textract#ClientRequestToken", + "traits": { + "smithy.api#documentation": "

Idempotent token is used to recognize the request. If the same token is used with multiple \n CreateAdapterVersion requests, the same session is returned. \n This token is employed to avoid unintentionally creating the same session multiple times.

", + "smithy.api#idempotencyToken": {} + } + }, + "DatasetConfig": { + "target": "com.amazonaws.textract#AdapterVersionDatasetConfig", + "traits": { + "smithy.api#documentation": "

Specifies a dataset used to train a new adapter version. Takes a ManifestS3Object as the\n value.

", + "smithy.api#required": {} + } + }, + "KMSKeyId": { + "target": "com.amazonaws.textract#KMSKeyId", + "traits": { + "smithy.api#documentation": "

The identifier for your AWS Key Management Service key (AWS KMS key). Used to encrypt your documents.

" + } + }, + "OutputConfig": { + "target": "com.amazonaws.textract#OutputConfig", + "traits": { + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.textract#TagMap", + "traits": { + "smithy.api#documentation": "

A set of tags (key-value pairs) that you want to attach to the adapter version.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#CreateAdapterVersionResponse": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing the unique ID for the adapter that has received a new version.

" + } + }, + "AdapterVersion": { + "target": "com.amazonaws.textract#AdapterVersion", + "traits": { + "smithy.api#documentation": "

A string describing the new version of the adapter.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#DateTime": { + "type": "timestamp" + }, + "com.amazonaws.textract#DeleteAdapter": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#DeleteAdapterRequest" + }, + "output": { + "target": "com.amazonaws.textract#DeleteAdapterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#ConflictException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Amazon Textract adapter. Takes an AdapterId and deletes the adapter specified by the ID.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.textract#DeleteAdapterRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter to be deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#DeleteAdapterResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#DeleteAdapterVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#DeleteAdapterVersionRequest" + }, + "output": { + "target": "com.amazonaws.textract#DeleteAdapterVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#ConflictException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Amazon Textract adapter version. Requires that you specify both an AdapterId and a \n AdapterVersion. Deletes the adapter version specified by the AdapterId and the AdapterVersion.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.textract#DeleteAdapterVersionRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter version that will be deleted.

", + "smithy.api#required": {} + } + }, + "AdapterVersion": { + "target": "com.amazonaws.textract#AdapterVersion", + "traits": { + "smithy.api#documentation": "

Specifies the adapter version to be deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#DeleteAdapterVersionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#DetectDocumentText": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#DetectDocumentTextRequest" + }, + "output": { + "target": "com.amazonaws.textract#DetectDocumentTextResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#BadDocumentException" + }, + { + "target": "com.amazonaws.textract#DocumentTooLargeException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#InvalidS3ObjectException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#UnsupportedDocumentException" + } + ], + "traits": { + "smithy.api#documentation": "

Detects text in the input document. Amazon Textract can detect lines of text and the\n words that make up a line of text. The input document must be in one of the following image\n formats: JPEG, PNG, PDF, or TIFF. DetectDocumentText returns the detected\n text in an array of Block objects.

\n

Each document page has as an associated Block of type PAGE. Each PAGE Block object\n is the parent of LINE Block objects that represent the lines of detected text on a page. A LINE Block object is\n a parent for each word that makes up the line. Words are represented by Block objects of type WORD.

\n

\n DetectDocumentText is a synchronous operation. To analyze documents \n asynchronously, use StartDocumentTextDetection.

\n

For more information, see Document Text Detection.

" + } + }, + "com.amazonaws.textract#DetectDocumentTextRequest": { + "type": "structure", + "members": { + "Document": { + "target": "com.amazonaws.textract#Document", + "traits": { + "smithy.api#documentation": "

The input document as base64-encoded bytes or an Amazon S3 object. If you use the AWS CLI\n to call Amazon Textract operations, you can't pass image bytes. The document must be an image \n in JPEG or PNG format.

\n

If you're using an AWS SDK to call Amazon Textract, you might not need to base64-encode\n image bytes that are passed using the Bytes field.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.textract#DetectDocumentTextResponse": { @@ -958,6 +1654,35 @@ "com.amazonaws.textract#ErrorCode": { "type": "string" }, + "com.amazonaws.textract#EvaluationMetric": { + "type": "structure", + "members": { + "F1Score": { + "target": "com.amazonaws.textract#Float", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The F1 score for an adapter version.

" + } + }, + "Precision": { + "target": "com.amazonaws.textract#Float", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The Precision score for an adapter version.

" + } + }, + "Recall": { + "target": "com.amazonaws.textract#Float", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The Recall score for an adapter version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The evaluation metrics (F1 score, Precision, and Recall) for an adapter version.

" + } + }, "com.amazonaws.textract#ExpenseCurrency": { "type": "structure", "members": { @@ -1217,24 +1942,252 @@ } } }, - "com.amazonaws.textract#Geometry": { + "com.amazonaws.textract#Geometry": { + "type": "structure", + "members": { + "BoundingBox": { + "target": "com.amazonaws.textract#BoundingBox", + "traits": { + "smithy.api#documentation": "

An axis-aligned coarse representation of the location of the recognized item on the\n document page.

" + } + }, + "Polygon": { + "target": "com.amazonaws.textract#Polygon", + "traits": { + "smithy.api#documentation": "

Within the bounding box, a fine-grained polygon around the recognized item.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about where the following items are located on a document page: detected\n page, text, key-value pairs, tables, table cells, and selection elements.

" + } + }, + "com.amazonaws.textract#GetAdapter": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#GetAdapterRequest" + }, + "output": { + "target": "com.amazonaws.textract#GetAdapterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets configuration information for an adapter specified by an AdapterId, returning information on AdapterName, Description,\n CreationTime, AutoUpdate status, and FeatureTypes.

" + } + }, + "com.amazonaws.textract#GetAdapterRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#GetAdapterResponse": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string identifying the adapter that information has been retrieved for.

" + } + }, + "AdapterName": { + "target": "com.amazonaws.textract#AdapterName", + "traits": { + "smithy.api#documentation": "

The name of the requested adapter.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

The date and time the requested adapter was created at.

" + } + }, + "Description": { + "target": "com.amazonaws.textract#AdapterDescription", + "traits": { + "smithy.api#documentation": "

The description for the requested adapter.

" + } + }, + "FeatureTypes": { + "target": "com.amazonaws.textract#FeatureTypes", + "traits": { + "smithy.api#documentation": "

List of the targeted feature types for the requested adapter.

" + } + }, + "AutoUpdate": { + "target": "com.amazonaws.textract#AutoUpdate", + "traits": { + "smithy.api#documentation": "

Binary value indicating if the adapter is being automatically updated or not.

" + } + }, + "Tags": { + "target": "com.amazonaws.textract#TagMap", + "traits": { + "smithy.api#documentation": "

A set of tags (key-value pairs) associated with the adapter that has been retrieved.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#GetAdapterVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#GetAdapterVersionRequest" + }, + "output": { + "target": "com.amazonaws.textract#GetAdapterVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets configuration information for the specified adapter version, including: \n AdapterId, AdapterVersion, FeatureTypes, Status, StatusMessage, DatasetConfig, \n KMSKeyId, OutputConfig, Tags and EvaluationMetrics.

" + } + }, + "com.amazonaws.textract#GetAdapterVersionRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string specifying a unique ID for the adapter version you want to retrieve information for.

", + "smithy.api#required": {} + } + }, + "AdapterVersion": { + "target": "com.amazonaws.textract#AdapterVersion", + "traits": { + "smithy.api#documentation": "

A string specifying the adapter version you want to retrieve information for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#GetAdapterVersionResponse": { "type": "structure", "members": { - "BoundingBox": { - "target": "com.amazonaws.textract#BoundingBox", + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", "traits": { - "smithy.api#documentation": "

An axis-aligned coarse representation of the location of the recognized item on the\n document page.

" + "smithy.api#documentation": "

A string containing a unique ID for the adapter version being retrieved.

" } }, - "Polygon": { - "target": "com.amazonaws.textract#Polygon", + "AdapterVersion": { + "target": "com.amazonaws.textract#AdapterVersion", "traits": { - "smithy.api#documentation": "

Within the bounding box, a fine-grained polygon around the recognized item.

" + "smithy.api#documentation": "

A string containing the adapter version that has been retrieved.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

The time that the adapter version was created.

" + } + }, + "FeatureTypes": { + "target": "com.amazonaws.textract#FeatureTypes", + "traits": { + "smithy.api#documentation": "

List of the targeted feature types for the requested adapter version.

" + } + }, + "Status": { + "target": "com.amazonaws.textract#AdapterVersionStatus", + "traits": { + "smithy.api#documentation": "

The status of the adapter version that has been requested.

" + } + }, + "StatusMessage": { + "target": "com.amazonaws.textract#AdapterVersionStatusMessage", + "traits": { + "smithy.api#documentation": "

A message that describes the status of the requested adapter version.

" + } + }, + "DatasetConfig": { + "target": "com.amazonaws.textract#AdapterVersionDatasetConfig", + "traits": { + "smithy.api#documentation": "

Specifies a dataset used to train a new adapter version. Takes a ManifestS3Objec as the\n value.

" + } + }, + "KMSKeyId": { + "target": "com.amazonaws.textract#KMSKeyId", + "traits": { + "smithy.api#documentation": "

The identifier for your AWS Key Management Service key (AWS KMS key). Used to encrypt your documents.

" + } + }, + "OutputConfig": { + "target": "com.amazonaws.textract#OutputConfig" + }, + "EvaluationMetrics": { + "target": "com.amazonaws.textract#AdapterVersionEvaluationMetrics", + "traits": { + "smithy.api#documentation": "

The evaluation metrics (F1 score, Precision, and Recall) for the requested version, \n grouped by baseline metrics and adapter version.

" + } + }, + "Tags": { + "target": "com.amazonaws.textract#TagMap", + "traits": { + "smithy.api#documentation": "

A set of tags (key-value pairs) that are associated with the adapter version.

" } } }, "traits": { - "smithy.api#documentation": "

Information about where the following items are located on a document page: detected\n page, text, key-value pairs, tables, table cells, and selection elements.

" + "smithy.api#output": {} } }, "com.amazonaws.textract#GetDocumentAnalysis": { @@ -2336,6 +3289,259 @@ "target": "com.amazonaws.textract#LineItemFields" } }, + "com.amazonaws.textract#ListAdapterVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#ListAdapterVersionsRequest" + }, + "output": { + "target": "com.amazonaws.textract#ListAdapterVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List all version of an adapter that meet the specified filtration criteria.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "AdapterVersions", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.textract#ListAdapterVersionsRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter to match for when listing adapter versions.

" + } + }, + "AfterCreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

Specifies the lower bound for the ListAdapterVersions operation. \n Ensures ListAdapterVersions returns only adapter versions created after the specified creation time.

" + } + }, + "BeforeCreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

Specifies the upper bound for the ListAdapterVersions operation. \n Ensures ListAdapterVersions returns only adapter versions created after the specified creation time.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.textract#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return when listing adapter versions.

" + } + }, + "NextToken": { + "target": "com.amazonaws.textract#PaginationToken", + "traits": { + "smithy.api#documentation": "

Identifies the next page of results to return when listing adapter versions.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#ListAdapterVersionsResponse": { + "type": "structure", + "members": { + "AdapterVersions": { + "target": "com.amazonaws.textract#AdapterVersionList", + "traits": { + "smithy.api#documentation": "

Adapter versions that match the filtering criteria specified when calling ListAdapters.

" + } + }, + "NextToken": { + "target": "com.amazonaws.textract#PaginationToken", + "traits": { + "smithy.api#documentation": "

Identifies the next page of results to return when listing adapter versions.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#ListAdapters": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#ListAdaptersRequest" + }, + "output": { + "target": "com.amazonaws.textract#ListAdaptersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all adapters that match the specified filtration criteria.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Adapters", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.textract#ListAdaptersRequest": { + "type": "structure", + "members": { + "AfterCreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

Specifies the lower bound for the ListAdapters operation. \n Ensures ListAdapters returns only adapters created after the specified creation time.

" + } + }, + "BeforeCreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

Specifies the upper bound for the ListAdapters operation. \n Ensures ListAdapters returns only adapters created before the specified creation time.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.textract#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return when listing adapters.

" + } + }, + "NextToken": { + "target": "com.amazonaws.textract#PaginationToken", + "traits": { + "smithy.api#documentation": "

Identifies the next page of results to return when listing adapters.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#ListAdaptersResponse": { + "type": "structure", + "members": { + "Adapters": { + "target": "com.amazonaws.textract#AdapterList", + "traits": { + "smithy.api#documentation": "

A list of adapters that matches the filtering criteria specified when calling ListAdapters.

" + } + }, + "NextToken": { + "target": "com.amazonaws.textract#PaginationToken", + "traits": { + "smithy.api#documentation": "

Identifies the next page of results to return when listing adapters.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.textract#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all tags for an Amazon Textract resource.

" + } + }, + "com.amazonaws.textract#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.textract#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that specifies the resource to list tags for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.textract#TagMap", + "traits": { + "smithy.api#documentation": "

A set of tags (key-value pairs) that are part of the requested resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.textract#MaxResults": { "type": "integer", "traits": { @@ -2452,7 +3658,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 255 + "max": 1024 }, "smithy.api#pattern": "\\S" } @@ -2704,6 +3910,21 @@ } } }, + "com.amazonaws.textract#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.textract#String" + }, + "Code": { + "target": "com.amazonaws.textract#String" + } + }, + "traits": { + "smithy.api#documentation": "

Returned when an operation tried to access a nonexistent resource.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.textract#RoleArn": { "type": "string", "traits": { @@ -2797,6 +4018,21 @@ } } }, + "com.amazonaws.textract#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.textract#String" + }, + "Code": { + "target": "com.amazonaws.textract#String" + } + }, + "traits": { + "smithy.api#documentation": "

Returned when a request cannot be completed as it would exceed a maximum service quota.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.textract#SignatureDetection": { "type": "structure", "members": { @@ -2945,6 +4181,12 @@ }, "QueriesConfig": { "target": "com.amazonaws.textract#QueriesConfig" + }, + "AdaptersConfig": { + "target": "com.amazonaws.textract#AdaptersConfig", + "traits": { + "smithy.api#documentation": "

Specifies the adapter to be used when analyzing a document.

" + } } }, "traits": { @@ -3307,6 +4549,120 @@ "smithy.api#mediaType": "application/json" } }, + "com.amazonaws.textract#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$" + } + }, + "com.amazonaws.textract#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.textract#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.textract#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.textract#TagKey" + }, + "value": { + "target": "com.amazonaws.textract#TagValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.textract#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.textract#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Adds one or more tags to the specified resource.

" + } + }, + "com.amazonaws.textract#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.textract#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that specifies the resource to be tagged.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.textract#TagMap", + "traits": { + "smithy.api#documentation": "

A set of tags (key-value pairs) that you want to assign to the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, "com.amazonaws.textract#TextType": { "type": "enum", "members": { @@ -3337,9 +4693,27 @@ { "target": "com.amazonaws.textract#AnalyzeID" }, + { + "target": "com.amazonaws.textract#CreateAdapter" + }, + { + "target": "com.amazonaws.textract#CreateAdapterVersion" + }, + { + "target": "com.amazonaws.textract#DeleteAdapter" + }, + { + "target": "com.amazonaws.textract#DeleteAdapterVersion" + }, { "target": "com.amazonaws.textract#DetectDocumentText" }, + { + "target": "com.amazonaws.textract#GetAdapter" + }, + { + "target": "com.amazonaws.textract#GetAdapterVersion" + }, { "target": "com.amazonaws.textract#GetDocumentAnalysis" }, @@ -3355,6 +4729,15 @@ { "target": "com.amazonaws.textract#GetLendingAnalysisSummary" }, + { + "target": "com.amazonaws.textract#ListAdapters" + }, + { + "target": "com.amazonaws.textract#ListAdapterVersions" + }, + { + "target": "com.amazonaws.textract#ListTagsForResource" + }, { "target": "com.amazonaws.textract#StartDocumentAnalysis" }, @@ -3366,6 +4749,15 @@ }, { "target": "com.amazonaws.textract#StartLendingAnalysis" + }, + { + "target": "com.amazonaws.textract#TagResource" + }, + { + "target": "com.amazonaws.textract#UntagResource" + }, + { + "target": "com.amazonaws.textract#UpdateAdapter" } ], "traits": { @@ -4310,6 +5702,200 @@ "smithy.api#error": "client" } }, + "com.amazonaws.textract#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.textract#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes any tags with the specified keys from the specified resource.

" + } + }, + "com.amazonaws.textract#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.textract#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that specifies the resource to be untagged.

", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.textract#TagKeyList", + "traits": { + "smithy.api#documentation": "

Specifies the tags to be removed from the resource specified by the ResourceARN.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#UpdateAdapter": { + "type": "operation", + "input": { + "target": "com.amazonaws.textract#UpdateAdapterRequest" + }, + "output": { + "target": "com.amazonaws.textract#UpdateAdapterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.textract#AccessDeniedException" + }, + { + "target": "com.amazonaws.textract#ConflictException" + }, + { + "target": "com.amazonaws.textract#InternalServerError" + }, + { + "target": "com.amazonaws.textract#InvalidParameterException" + }, + { + "target": "com.amazonaws.textract#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.textract#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.textract#ThrottlingException" + }, + { + "target": "com.amazonaws.textract#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Update the configuration for an adapter. FeatureTypes configurations cannot be updated.\n At least one new parameter must be specified as an argument.

" + } + }, + "com.amazonaws.textract#UpdateAdapterRequest": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter that will be updated.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.textract#AdapterDescription", + "traits": { + "smithy.api#documentation": "

The new description to be applied to the adapter.

" + } + }, + "AdapterName": { + "target": "com.amazonaws.textract#AdapterName", + "traits": { + "smithy.api#documentation": "

The new name to be applied to the adapter.

" + } + }, + "AutoUpdate": { + "target": "com.amazonaws.textract#AutoUpdate", + "traits": { + "smithy.api#documentation": "

The new auto-update status to be applied to the adapter.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.textract#UpdateAdapterResponse": { + "type": "structure", + "members": { + "AdapterId": { + "target": "com.amazonaws.textract#AdapterId", + "traits": { + "smithy.api#documentation": "

A string containing a unique ID for the adapter that has been updated.

" + } + }, + "AdapterName": { + "target": "com.amazonaws.textract#AdapterName", + "traits": { + "smithy.api#documentation": "

A string containing the name of the adapter that has been updated.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.textract#DateTime", + "traits": { + "smithy.api#documentation": "

An object specifying the creation time of the the adapter that has been updated.

" + } + }, + "Description": { + "target": "com.amazonaws.textract#AdapterDescription", + "traits": { + "smithy.api#documentation": "

A string containing the description of the adapter that has been updated.

" + } + }, + "FeatureTypes": { + "target": "com.amazonaws.textract#FeatureTypes", + "traits": { + "smithy.api#documentation": "

List of the targeted feature types for the updated adapter.

" + } + }, + "AutoUpdate": { + "target": "com.amazonaws.textract#AutoUpdate", + "traits": { + "smithy.api#documentation": "

The auto-update status of the adapter that has been updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.textract#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.textract#String" + }, + "Code": { + "target": "com.amazonaws.textract#String" + } + }, + "traits": { + "smithy.api#documentation": "

Indicates that a request was not valid. Check request for proper formatting.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.textract#ValueType": { "type": "enum", "members": { diff --git a/models/transcribe.json b/models/transcribe.json index 5a676378c8..53fc340feb 100644 --- a/models/transcribe.json +++ b/models/transcribe.json @@ -3405,6 +3405,12 @@ "traits": { "smithy.api#enumValue": "webm" } + }, + "M4A": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m4a" + } } } }, diff --git a/models/transfer.json b/models/transfer.json index aae1322008..381e997ce7 100644 --- a/models/transfer.json +++ b/models/transfer.json @@ -5779,7 +5779,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 512 + "max": 4096 }, "smithy.api#pattern": "^[\\x09-\\x0D\\x20-\\x7E]*$" } @@ -5789,7 +5789,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 512 + "max": 4096 }, "smithy.api#pattern": "^[\\x09-\\x0D\\x20-\\x7E]*$" } @@ -6385,7 +6385,7 @@ "TrustedHostKeys": { "target": "com.amazonaws.transfer#SftpConnectorTrustedHostKeyList", "traits": { - "smithy.api#documentation": "

The public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting. You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key.

\n

The three standard SSH public key format elements are ,\n , and an optional , with spaces\n between each element. Specify only the and : do not enter the portion of the key.

\n

For the trusted host key, Transfer Family accepts RSA and ECDSA keys.

\n
    \n
  • \n

    For RSA keys, the key type is ssh-rsa.

    \n
  • \n
  • \n

    For ECDSA keys, the key type is either ecdsa-sha2-nistp256,\n ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the\n size of the key you generated.

    \n
  • \n
" + "smithy.api#documentation": "

The public portion of the host key, or keys, that are used to identify the external server to which you are connecting.\n You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key.

\n

The three standard SSH public key format elements are ,\n , and an optional , with spaces\n between each element. Specify only the and : do not enter the portion of the key.

\n

For the trusted host key, Transfer Family accepts RSA and ECDSA keys.

\n
    \n
  • \n

    For RSA keys, the string is ssh-rsa.

    \n
  • \n
  • \n

    For ECDSA keys, the string is either\n ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or\n ecdsa-sha2-nistp521, depending on the size of the key you generated.

    \n
  • \n
" } } }, diff --git a/models/workspaces.json b/models/workspaces.json index ec356301e0..fef5b714a9 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -164,6 +164,35 @@ } } }, + "com.amazonaws.workspaces#ApplicationAssociatedResourceType": { + "type": "enum", + "members": { + "WORKSPACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WORKSPACE" + } + }, + "BUNDLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BUNDLE" + } + }, + "IMAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IMAGE" + } + } + } + }, + "com.amazonaws.workspaces#ApplicationAssociatedResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#ApplicationAssociatedResourceType" + } + }, "com.amazonaws.workspaces#ApplicationList": { "type": "list", "member": { @@ -176,6 +205,70 @@ } } }, + "com.amazonaws.workspaces#ApplicationNotSupportedException": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The specified application is not supported.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.workspaces#ApplicationResourceAssociation": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationId", + "traits": { + "smithy.api#documentation": "

The identifier of the application.

" + } + }, + "AssociatedResourceId": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the associated resource.

" + } + }, + "AssociatedResourceType": { + "target": "com.amazonaws.workspaces#ApplicationAssociatedResourceType", + "traits": { + "smithy.api#documentation": "

The resource type of the associated resource.

" + } + }, + "Created": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association was created.

" + } + }, + "LastUpdatedTime": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association status was last updated.

" + } + }, + "State": { + "target": "com.amazonaws.workspaces#AssociationState", + "traits": { + "smithy.api#documentation": "

The status of the application resource association.

" + } + }, + "StateReason": { + "target": "com.amazonaws.workspaces#AssociationStateReason", + "traits": { + "smithy.api#documentation": "

The reason the association deployment failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the association between an application and an application resource.

" + } + }, + "com.amazonaws.workspaces#ApplicationResourceAssociationList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#ApplicationResourceAssociation" + } + }, "com.amazonaws.workspaces#AssociateConnectionAlias": { "type": "operation", "input": { @@ -305,6 +398,200 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#AssociateWorkspaceApplication": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#AssociateWorkspaceApplicationRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#AssociateWorkspaceApplicationResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#ApplicationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ComputeNotCompatibleException" + }, + { + "target": "com.amazonaws.workspaces#IncompatibleApplicationsException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperatingSystemNotCompatibleException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceAlreadyExistsException" + }, + { + "target": "com.amazonaws.workspaces#ResourceInUseException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Associates the specified application to the specified WorkSpace.

" + } + }, + "com.amazonaws.workspaces#AssociateWorkspaceApplicationRequest": { + "type": "structure", + "members": { + "WorkspaceId": { + "target": "com.amazonaws.workspaces#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The identifier of the WorkSpace.

", + "smithy.api#required": {} + } + }, + "ApplicationId": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationId", + "traits": { + "smithy.api#documentation": "

The identifier of the application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#AssociateWorkspaceApplicationResult": { + "type": "structure", + "members": { + "Association": { + "target": "com.amazonaws.workspaces#WorkspaceResourceAssociation", + "traits": { + "smithy.api#documentation": "

Information about the association between the specified WorkSpace and the specified application.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#AssociationErrorCode": { + "type": "enum", + "members": { + "INSUFFICIENT_DISKSPACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ValidationError.InsufficientDiskSpace" + } + }, + "INSUFFICIENT_MEMORY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ValidationError.InsufficientMemory" + } + }, + "UNSUPPORTED_OPERATING_SYSTEM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ValidationError.UnsupportedOperatingSystem" + } + }, + "INTERNAL_SERVER_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeploymentError.InternalServerError" + } + }, + "WORKSPACE_UNREACHABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeploymentError.WorkspaceUnreachable" + } + } + } + }, + "com.amazonaws.workspaces#AssociationState": { + "type": "enum", + "members": { + "PENDING_INSTALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_INSTALL" + } + }, + "PENDING_INSTALL_DEPLOYMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_INSTALL_DEPLOYMENT" + } + }, + "PENDING_UNINSTALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_UNINSTALL" + } + }, + "PENDING_UNINSTALL_DEPLOYMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_UNINSTALL_DEPLOYMENT" + } + }, + "INSTALLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSTALLING" + } + }, + "UNINSTALLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNINSTALLING" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "REMOVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REMOVED" + } + } + } + }, + "com.amazonaws.workspaces#AssociationStateReason": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.workspaces#AssociationErrorCode", + "traits": { + "smithy.api#documentation": "

The error code of the association deployment failure.

" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.workspaces#String2048", + "traits": { + "smithy.api#documentation": "

The error message of the association deployment failure.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the reason that the association deployment failed, including the error code and error message.

" + } + }, "com.amazonaws.workspaces#AssociationStatus": { "type": "enum", "members": { @@ -407,6 +694,23 @@ "com.amazonaws.workspaces#BooleanObject": { "type": "boolean" }, + "com.amazonaws.workspaces#BundleAssociatedResourceType": { + "type": "enum", + "members": { + "APPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APPLICATION" + } + } + } + }, + "com.amazonaws.workspaces#BundleAssociatedResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#BundleAssociatedResourceType" + } + }, "com.amazonaws.workspaces#BundleId": { "type": "string", "traits": { @@ -434,6 +738,62 @@ "com.amazonaws.workspaces#BundleOwner": { "type": "string" }, + "com.amazonaws.workspaces#BundleResourceAssociation": { + "type": "structure", + "members": { + "AssociatedResourceId": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the associated resource.

" + } + }, + "AssociatedResourceType": { + "target": "com.amazonaws.workspaces#BundleAssociatedResourceType", + "traits": { + "smithy.api#documentation": "

The resource type of the associated resources.

" + } + }, + "BundleId": { + "target": "com.amazonaws.workspaces#BundleId", + "traits": { + "smithy.api#documentation": "

The identifier of the bundle.

" + } + }, + "Created": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association is created.

" + } + }, + "LastUpdatedTime": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association status was last updated.

" + } + }, + "State": { + "target": "com.amazonaws.workspaces#AssociationState", + "traits": { + "smithy.api#documentation": "

The status of the bundle resource association.

" + } + }, + "StateReason": { + "target": "com.amazonaws.workspaces#AssociationStateReason", + "traits": { + "smithy.api#documentation": "

The reason the association deployment failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the association between an application and a bundle resource.

" + } + }, + "com.amazonaws.workspaces#BundleResourceAssociationList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#BundleResourceAssociation" + } + }, "com.amazonaws.workspaces#BundleType": { "type": "enum", "members": { @@ -696,6 +1056,20 @@ } } }, + "com.amazonaws.workspaces#ComputeList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#Compute" + } + }, + "com.amazonaws.workspaces#ComputeNotCompatibleException": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The compute type of the WorkSpace is not compatible with the application.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.workspaces#ComputeType": { "type": "structure", "members": { @@ -1715,7 +2089,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates one or more WorkSpaces.

\n

This operation is asynchronous and returns before the WorkSpaces are created.

\n \n

The MANUAL running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.

\n
" + "smithy.api#documentation": "

Creates one or more WorkSpaces.

\n

This operation is asynchronous and returns before the WorkSpaces are created.

\n \n
    \n
  • \n

    The MANUAL running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    PCoIP is only available for Windows bundles.

    \n
  • \n
\n
" } }, "com.amazonaws.workspaces#CreateWorkspacesRequest": { @@ -2324,24 +2698,237 @@ } } }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.workspaces#DeleteWorkspaceImageResult": { - "type": "structure", - "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DeleteWorkspaceImageResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#DeployWorkspaceApplications": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DeployWorkspaceApplicationsRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DeployWorkspaceApplicationsResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#IncompatibleApplicationsException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceInUseException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Deploys associated applications to the specified WorkSpace

" + } + }, + "com.amazonaws.workspaces#DeployWorkspaceApplicationsRequest": { + "type": "structure", + "members": { + "WorkspaceId": { + "target": "com.amazonaws.workspaces#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The identifier of the WorkSpace.

", + "smithy.api#required": {} + } + }, + "Force": { + "target": "com.amazonaws.workspaces#BooleanObject", + "traits": { + "smithy.api#documentation": "

Indicates whether the force flag is applied for the specified WorkSpace. When the force flag is enabled, \n it allows previously failed deployments to be retried.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DeployWorkspaceApplicationsResult": { + "type": "structure", + "members": { + "Deployment": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationDeployment", + "traits": { + "smithy.api#documentation": "

The list of deployed associations and information about them.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#DeregisterWorkspaceDirectory": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#InvalidResourceStateException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Deregisters the specified directory. This operation is asynchronous and returns before\n the WorkSpace directory is deregistered. If any WorkSpaces are registered to this\n directory, you must remove them before you can deregister the directory.

\n \n

Simple AD and AD Connector are made available to you free of charge to use with\n WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector\n directory for 30 consecutive days, this directory will be automatically deregistered for\n use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing\n terms.

\n

To delete empty directories, see Delete the\n Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector\n directory, you can always create a new one when you want to start using WorkSpaces\n again.

\n
" + } + }, + "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.workspaces#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier of the directory. If any WorkSpaces are registered to this directory, you\n must remove them before you deregister the directory, or you will receive an\n OperationNotSupportedException error.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#DescribeAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DescribeAccountRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DescribeAccountResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves a list that describes the configuration of Bring Your Own License (BYOL) for\n the specified account.

" + } + }, + "com.amazonaws.workspaces#DescribeAccountModifications": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DescribeAccountModificationsRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DescribeAccountModificationsResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves a list that describes modifications to the configuration of Bring Your Own\n License (BYOL) for the specified account.

" + } + }, + "com.amazonaws.workspaces#DescribeAccountModificationsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.workspaces#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a NextToken from a previous call that was paginated,\n provide this token to receive the next set of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DescribeAccountModificationsResult": { + "type": "structure", + "members": { + "AccountModifications": { + "target": "com.amazonaws.workspaces#AccountModificationList", + "traits": { + "smithy.api#documentation": "

The list of modifications to the configuration of BYOL.

" + } + }, + "NextToken": { + "target": "com.amazonaws.workspaces#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#DescribeAccountRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DescribeAccountResult": { + "type": "structure", + "members": { + "DedicatedTenancySupport": { + "target": "com.amazonaws.workspaces#DedicatedTenancySupportResultEnum", + "traits": { + "smithy.api#documentation": "

The status of BYOL (whether BYOL is enabled or disabled).

" + } + }, + "DedicatedTenancyManagementCidrRange": { + "target": "com.amazonaws.workspaces#DedicatedTenancyManagementCidrRange", + "traits": { + "smithy.api#documentation": "

The IP address range, specified as an IPv4 CIDR block, used for the management network\n interface.

\n

The management network interface is connected to a secure Amazon WorkSpaces management\n network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces\n clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" + } + } + }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.workspaces#DeregisterWorkspaceDirectory": { + "com.amazonaws.workspaces#DescribeApplicationAssociations": { "type": "operation", "input": { - "target": "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryRequest" + "target": "com.amazonaws.workspaces#DescribeApplicationAssociationsRequest" }, "output": { - "target": "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryResult" + "target": "com.amazonaws.workspaces#DescribeApplicationAssociationsResult" }, "errors": [ { @@ -2350,9 +2937,6 @@ { "target": "com.amazonaws.workspaces#InvalidParameterValuesException" }, - { - "target": "com.amazonaws.workspaces#InvalidResourceStateException" - }, { "target": "com.amazonaws.workspaces#OperationNotSupportedException" }, @@ -2361,16 +2945,40 @@ } ], "traits": { - "smithy.api#documentation": "

Deregisters the specified directory. This operation is asynchronous and returns before\n the WorkSpace directory is deregistered. If any WorkSpaces are registered to this\n directory, you must remove them before you can deregister the directory.

\n \n

Simple AD and AD Connector are made available to you free of charge to use with\n WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector\n directory for 30 consecutive days, this directory will be automatically deregistered for\n use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing\n terms.

\n

To delete empty directories, see Delete the\n Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector\n directory, you can always create a new one when you want to start using WorkSpaces\n again.

\n
" + "smithy.api#documentation": "

Describes the associations between the application and the specified associated resources.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } } }, - "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryRequest": { + "com.amazonaws.workspaces#DescribeApplicationAssociationsRequest": { "type": "structure", "members": { - "DirectoryId": { - "target": "com.amazonaws.workspaces#DirectoryId", + "MaxResults": { + "target": "com.amazonaws.workspaces#Limit", "traits": { - "smithy.api#documentation": "

The identifier of the directory. If any WorkSpaces are registered to this directory, you\n must remove them before you deregister the directory, or you will receive an\n OperationNotSupportedException error.

", + "smithy.api#documentation": "

The maximum number of associations to return.

" + } + }, + "NextToken": { + "target": "com.amazonaws.workspaces#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" + } + }, + "ApplicationId": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationId", + "traits": { + "smithy.api#documentation": "

The identifier of the specified application.

", + "smithy.api#required": {} + } + }, + "AssociatedResourceTypes": { + "target": "com.amazonaws.workspaces#ApplicationAssociatedResourceTypeList", + "traits": { + "smithy.api#documentation": "

The resource type of the associated resources.

", "smithy.api#required": {} } } @@ -2379,54 +2987,100 @@ "smithy.api#input": {} } }, - "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryResult": { + "com.amazonaws.workspaces#DescribeApplicationAssociationsResult": { "type": "structure", - "members": {}, + "members": { + "Associations": { + "target": "com.amazonaws.workspaces#ApplicationResourceAssociationList", + "traits": { + "smithy.api#documentation": "

List of associations and information about them.

" + } + }, + "NextToken": { + "target": "com.amazonaws.workspaces#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" + } + } + }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.workspaces#DescribeAccount": { + "com.amazonaws.workspaces#DescribeApplications": { "type": "operation", "input": { - "target": "com.amazonaws.workspaces#DescribeAccountRequest" + "target": "com.amazonaws.workspaces#DescribeApplicationsRequest" }, "output": { - "target": "com.amazonaws.workspaces#DescribeAccountResult" + "target": "com.amazonaws.workspaces#DescribeApplicationsResult" }, "errors": [ { "target": "com.amazonaws.workspaces#AccessDeniedException" - } - ], - "traits": { - "smithy.api#documentation": "

Retrieves a list that describes the configuration of Bring Your Own License (BYOL) for\n the specified account.

" - } - }, - "com.amazonaws.workspaces#DescribeAccountModifications": { - "type": "operation", - "input": { - "target": "com.amazonaws.workspaces#DescribeAccountModificationsRequest" - }, - "output": { - "target": "com.amazonaws.workspaces#DescribeAccountModificationsResult" - }, - "errors": [ + }, { - "target": "com.amazonaws.workspaces#AccessDeniedException" + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" } ], "traits": { - "smithy.api#documentation": "

Retrieves a list that describes modifications to the configuration of Bring Your Own\n License (BYOL) for the specified account.

" + "smithy.api#documentation": "

Describes the specified applications by filtering based on their compute types, license availability, operating systems, and owners.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } } }, - "com.amazonaws.workspaces#DescribeAccountModificationsRequest": { + "com.amazonaws.workspaces#DescribeApplicationsRequest": { "type": "structure", "members": { + "ApplicationIds": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationIdList", + "traits": { + "smithy.api#documentation": "

The identifiers of one or more applications.

" + } + }, + "ComputeTypeNames": { + "target": "com.amazonaws.workspaces#ComputeList", + "traits": { + "smithy.api#documentation": "

The compute types supported by the applications.

" + } + }, + "LicenseType": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationLicenseType", + "traits": { + "smithy.api#documentation": "

The license availability for the applications.

" + } + }, + "OperatingSystemNames": { + "target": "com.amazonaws.workspaces#OperatingSystemNameList", + "traits": { + "smithy.api#documentation": "

The operating systems supported by the applications.

" + } + }, + "Owner": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationOwner", + "traits": { + "smithy.api#documentation": "

The owner of the applications.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.workspaces#Limit", + "traits": { + "smithy.api#documentation": "

The maximum number of applications to return.

" + } + }, "NextToken": { "target": "com.amazonaws.workspaces#PaginationToken", "traits": { - "smithy.api#documentation": "

If you received a NextToken from a previous call that was paginated,\n provide this token to receive the next set of results.

" + "smithy.api#documentation": "

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" } } }, @@ -2434,19 +3088,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.workspaces#DescribeAccountModificationsResult": { + "com.amazonaws.workspaces#DescribeApplicationsResult": { "type": "structure", "members": { - "AccountModifications": { - "target": "com.amazonaws.workspaces#AccountModificationList", + "Applications": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationList", "traits": { - "smithy.api#documentation": "

The list of modifications to the configuration of BYOL.

" + "smithy.api#documentation": "

List of information about the specified applications.

" } }, "NextToken": { "target": "com.amazonaws.workspaces#PaginationToken", "traits": { - "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.

" + "smithy.api#documentation": "

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" } } }, @@ -2454,26 +3108,61 @@ "smithy.api#output": {} } }, - "com.amazonaws.workspaces#DescribeAccountRequest": { - "type": "structure", - "members": {}, + "com.amazonaws.workspaces#DescribeBundleAssociations": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DescribeBundleAssociationsRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DescribeBundleAssociationsResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Describes the associations between the applications and the specified bundle.

" } }, - "com.amazonaws.workspaces#DescribeAccountResult": { + "com.amazonaws.workspaces#DescribeBundleAssociationsRequest": { "type": "structure", "members": { - "DedicatedTenancySupport": { - "target": "com.amazonaws.workspaces#DedicatedTenancySupportResultEnum", + "BundleId": { + "target": "com.amazonaws.workspaces#BundleId", "traits": { - "smithy.api#documentation": "

The status of BYOL (whether BYOL is enabled or disabled).

" + "smithy.api#documentation": "

The identifier of the bundle.

", + "smithy.api#required": {} } }, - "DedicatedTenancyManagementCidrRange": { - "target": "com.amazonaws.workspaces#DedicatedTenancyManagementCidrRange", + "AssociatedResourceTypes": { + "target": "com.amazonaws.workspaces#BundleAssociatedResourceTypeList", "traits": { - "smithy.api#documentation": "

The IP address range, specified as an IPv4 CIDR block, used for the management network\n interface.

\n

The management network interface is connected to a secure Amazon WorkSpaces management\n network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces\n clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" + "smithy.api#documentation": "

The resource types of the associated resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DescribeBundleAssociationsResult": { + "type": "structure", + "members": { + "Associations": { + "target": "com.amazonaws.workspaces#BundleResourceAssociationList", + "traits": { + "smithy.api#documentation": "

List of information about the specified associations.

" } } }, @@ -2839,6 +3528,68 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#DescribeImageAssociations": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DescribeImageAssociationsRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DescribeImageAssociationsResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Describes the associations between the applications and the specified image.

" + } + }, + "com.amazonaws.workspaces#DescribeImageAssociationsRequest": { + "type": "structure", + "members": { + "ImageId": { + "target": "com.amazonaws.workspaces#WorkspaceImageId", + "traits": { + "smithy.api#documentation": "

The identifier of the image.

", + "smithy.api#required": {} + } + }, + "AssociatedResourceTypes": { + "target": "com.amazonaws.workspaces#ImageAssociatedResourceTypeList", + "traits": { + "smithy.api#documentation": "

The resource types of the associated resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DescribeImageAssociationsResult": { + "type": "structure", + "members": { + "Associations": { + "target": "com.amazonaws.workspaces#ImageResourceAssociationList", + "traits": { + "smithy.api#documentation": "

List of information about the specified associations.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workspaces#DescribeIpGroups": { "type": "operation", "input": { @@ -2877,7 +3628,59 @@ "MaxResults": { "target": "com.amazonaws.workspaces#Limit", "traits": { - "smithy.api#documentation": "

The maximum number of items to return.

" + "smithy.api#documentation": "

The maximum number of items to return.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DescribeIpGroupsResult": { + "type": "structure", + "members": { + "Result": { + "target": "com.amazonaws.workspaces#WorkspacesIpGroupsList", + "traits": { + "smithy.api#documentation": "

Information about the IP access control groups.

" + } + }, + "NextToken": { + "target": "com.amazonaws.workspaces#PaginationToken", + "traits": { + "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#DescribeTags": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DescribeTagsRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DescribeTagsResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Describes the specified tags for the specified WorkSpaces resource.

" + } + }, + "com.amazonaws.workspaces#DescribeTagsRequest": { + "type": "structure", + "members": { + "ResourceId": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the WorkSpaces resource. The supported resource types are WorkSpaces,\n registered directories, images, custom bundles, IP access control groups, and connection\n aliases.

", + "smithy.api#required": {} } } }, @@ -2885,19 +3688,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.workspaces#DescribeIpGroupsResult": { + "com.amazonaws.workspaces#DescribeTagsResult": { "type": "structure", "members": { - "Result": { - "target": "com.amazonaws.workspaces#WorkspacesIpGroupsList", - "traits": { - "smithy.api#documentation": "

Information about the IP access control groups.

" - } - }, - "NextToken": { - "target": "com.amazonaws.workspaces#PaginationToken", + "TagList": { + "target": "com.amazonaws.workspaces#TagList", "traits": { - "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.

" + "smithy.api#documentation": "

The tags.

" } } }, @@ -2905,30 +3702,46 @@ "smithy.api#output": {} } }, - "com.amazonaws.workspaces#DescribeTags": { + "com.amazonaws.workspaces#DescribeWorkspaceAssociations": { "type": "operation", "input": { - "target": "com.amazonaws.workspaces#DescribeTagsRequest" + "target": "com.amazonaws.workspaces#DescribeWorkspaceAssociationsRequest" }, "output": { - "target": "com.amazonaws.workspaces#DescribeTagsResult" + "target": "com.amazonaws.workspaces#DescribeWorkspaceAssociationsResult" }, "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, { "target": "com.amazonaws.workspaces#ResourceNotFoundException" } ], "traits": { - "smithy.api#documentation": "

Describes the specified tags for the specified WorkSpaces resource.

" + "smithy.api#documentation": "

Describes the associations betweens applications and the specified WorkSpace.

" } }, - "com.amazonaws.workspaces#DescribeTagsRequest": { + "com.amazonaws.workspaces#DescribeWorkspaceAssociationsRequest": { "type": "structure", "members": { - "ResourceId": { - "target": "com.amazonaws.workspaces#NonEmptyString", + "WorkspaceId": { + "target": "com.amazonaws.workspaces#WorkspaceId", "traits": { - "smithy.api#documentation": "

The identifier of the WorkSpaces resource. The supported resource types are WorkSpaces,\n registered directories, images, custom bundles, IP access control groups, and connection\n aliases.

", + "smithy.api#documentation": "

The identifier of the WorkSpace.

", + "smithy.api#required": {} + } + }, + "AssociatedResourceTypes": { + "target": "com.amazonaws.workspaces#WorkSpaceAssociatedResourceTypeList", + "traits": { + "smithy.api#documentation": "

The resource types of the associated resources.

", "smithy.api#required": {} } } @@ -2937,13 +3750,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.workspaces#DescribeTagsResult": { + "com.amazonaws.workspaces#DescribeWorkspaceAssociationsResult": { "type": "structure", "members": { - "TagList": { - "target": "com.amazonaws.workspaces#TagList", + "Associations": { + "target": "com.amazonaws.workspaces#WorkspaceResourceAssociationList", "traits": { - "smithy.api#documentation": "

The tags.

" + "smithy.api#documentation": "

List of information about the specified associations.

" } } }, @@ -3577,6 +4390,71 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#DisassociateWorkspaceApplication": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspaces#DisassociateWorkspaceApplicationRequest" + }, + "output": { + "target": "com.amazonaws.workspaces#DisassociateWorkspaceApplicationResult" + }, + "errors": [ + { + "target": "com.amazonaws.workspaces#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspaces#InvalidParameterValuesException" + }, + { + "target": "com.amazonaws.workspaces#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.workspaces#ResourceInUseException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Disassociates the specified application from a WorkSpace.

" + } + }, + "com.amazonaws.workspaces#DisassociateWorkspaceApplicationRequest": { + "type": "structure", + "members": { + "WorkspaceId": { + "target": "com.amazonaws.workspaces#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The identifier of the WorkSpace.

", + "smithy.api#required": {} + } + }, + "ApplicationId": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationId", + "traits": { + "smithy.api#documentation": "

The identifier of the application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#DisassociateWorkspaceApplicationResult": { + "type": "structure", + "members": { + "Association": { + "target": "com.amazonaws.workspaces#WorkspaceResourceAssociation", + "traits": { + "smithy.api#documentation": "

Information about the targeted association.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workspaces#DnsIpAddresses": { "type": "list", "member": { @@ -3606,7 +4484,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides in-depth details about the error. These details include the \n possible causes of the errors and troubleshooting information.

" + "smithy.api#documentation": "

Describes in-depth details about the error. These details include the \n possible causes of the error and troubleshooting information.

" } }, "com.amazonaws.workspaces#ErrorDetailsList": { @@ -3744,6 +4622,23 @@ "smithy.api#documentation": "

Describes a WorkSpace that could not be rebooted. (RebootWorkspaces),\n rebuilt (RebuildWorkspaces), restored (RestoreWorkspace), terminated (TerminateWorkspaces), started (StartWorkspaces), or stopped (StopWorkspaces).

" } }, + "com.amazonaws.workspaces#ImageAssociatedResourceType": { + "type": "enum", + "members": { + "APPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APPLICATION" + } + } + } + }, + "com.amazonaws.workspaces#ImageAssociatedResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#ImageAssociatedResourceType" + } + }, "com.amazonaws.workspaces#ImagePermission": { "type": "structure", "members": { @@ -3764,6 +4659,62 @@ "target": "com.amazonaws.workspaces#ImagePermission" } }, + "com.amazonaws.workspaces#ImageResourceAssociation": { + "type": "structure", + "members": { + "AssociatedResourceId": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the associated resource.

" + } + }, + "AssociatedResourceType": { + "target": "com.amazonaws.workspaces#ImageAssociatedResourceType", + "traits": { + "smithy.api#documentation": "

The resource type of the associated resources.

" + } + }, + "Created": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association is created.

" + } + }, + "LastUpdatedTime": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association status was last updated.

" + } + }, + "ImageId": { + "target": "com.amazonaws.workspaces#WorkspaceImageId", + "traits": { + "smithy.api#documentation": "

The identifier of the image.

" + } + }, + "State": { + "target": "com.amazonaws.workspaces#AssociationState", + "traits": { + "smithy.api#documentation": "

The status of the image resource association.

" + } + }, + "StateReason": { + "target": "com.amazonaws.workspaces#AssociationStateReason", + "traits": { + "smithy.api#documentation": "

The reason the association deployment failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the association between an application and an image resource.

" + } + }, + "com.amazonaws.workspaces#ImageResourceAssociationList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#ImageResourceAssociation" + } + }, "com.amazonaws.workspaces#ImageType": { "type": "enum", "members": { @@ -3996,6 +4947,14 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#IncompatibleApplicationsException": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The specified application is not compatible with the resource.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.workspaces#InvalidParameterValuesException": { "type": "structure", "members": { @@ -4932,63 +5891,148 @@ { "target": "com.amazonaws.workspaces#OperationNotSupportedException" }, - { - "target": "com.amazonaws.workspaces#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Sets the state of the specified WorkSpace.

\n

To maintain a WorkSpace without being interrupted, set the WorkSpace state to\n ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to\n reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not\n stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE\n state.

" - } - }, - "com.amazonaws.workspaces#ModifyWorkspaceStateRequest": { - "type": "structure", - "members": { - "WorkspaceId": { - "target": "com.amazonaws.workspaces#WorkspaceId", + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Sets the state of the specified WorkSpace.

\n

To maintain a WorkSpace without being interrupted, set the WorkSpace state to\n ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to\n reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not\n stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE\n state.

" + } + }, + "com.amazonaws.workspaces#ModifyWorkspaceStateRequest": { + "type": "structure", + "members": { + "WorkspaceId": { + "target": "com.amazonaws.workspaces#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The identifier of the WorkSpace.

", + "smithy.api#required": {} + } + }, + "WorkspaceState": { + "target": "com.amazonaws.workspaces#TargetWorkspaceState", + "traits": { + "smithy.api#documentation": "

The WorkSpace state.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspaces#ModifyWorkspaceStateResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspaces#NonEmptyString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.workspaces#OperatingSystem": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.workspaces#OperatingSystemType", + "traits": { + "smithy.api#documentation": "

The operating system.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The operating system that the image is running.

" + } + }, + "com.amazonaws.workspaces#OperatingSystemName": { + "type": "enum", + "members": { + "AMAZON_LINUX_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMAZON_LINUX_2" + } + }, + "UBUNTU_18_04": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UBUNTU_18_04" + } + }, + "UBUNTU_20_04": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UBUNTU_20_04" + } + }, + "UBUNTU_22_04": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UBUNTU_22_04" + } + }, + "UNKNOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNKNOWN" + } + }, + "WINDOWS_10": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WINDOWS_10" + } + }, + "WINDOWS_11": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WINDOWS_11" + } + }, + "WINDOWS_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WINDOWS_7" + } + }, + "WINDOWS_SERVER_2016": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WINDOWS_SERVER_2016" + } + }, + "WINDOWS_SERVER_2019": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The identifier of the WorkSpace.

", - "smithy.api#required": {} + "smithy.api#enumValue": "WINDOWS_SERVER_2019" } }, - "WorkspaceState": { - "target": "com.amazonaws.workspaces#TargetWorkspaceState", + "WINDOWS_SERVER_2022": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The WorkSpace state.

", - "smithy.api#required": {} + "smithy.api#enumValue": "WINDOWS_SERVER_2022" } } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.workspaces#ModifyWorkspaceStateResult": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} } }, - "com.amazonaws.workspaces#NonEmptyString": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1 - } + "com.amazonaws.workspaces#OperatingSystemNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#OperatingSystemName" } }, - "com.amazonaws.workspaces#OperatingSystem": { + "com.amazonaws.workspaces#OperatingSystemNotCompatibleException": { "type": "structure", - "members": { - "Type": { - "target": "com.amazonaws.workspaces#OperatingSystemType", - "traits": { - "smithy.api#documentation": "

The operating system.

" - } - } - }, + "members": {}, "traits": { - "smithy.api#documentation": "

The operating system that the image is running.

" + "smithy.api#documentation": "

The operating system of the WorkSpace is not compatible with the application.

", + "smithy.api#error": "client" } }, "com.amazonaws.workspaces#OperatingSystemType": { @@ -5470,6 +6514,24 @@ } } }, + "com.amazonaws.workspaces#ResourceInUseException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspaces#ExceptionMessage" + }, + "ResourceId": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The ID of the resource that is in use.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The specified resource is currently in use.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.workspaces#ResourceLimitExceededException": { "type": "structure", "members": { @@ -5991,6 +7053,15 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#String2048": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, "com.amazonaws.workspaces#SubnetId": { "type": "string", "traits": { @@ -6559,6 +7630,175 @@ "com.amazonaws.workspaces#VolumeEncryptionKey": { "type": "string" }, + "com.amazonaws.workspaces#WorkSpaceApplication": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationId", + "traits": { + "smithy.api#documentation": "

The identifier of the application.

" + } + }, + "Created": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the application is created.

" + } + }, + "Description": { + "target": "com.amazonaws.workspaces#String2048", + "traits": { + "smithy.api#documentation": "

The description of the WorkSpace application.

" + } + }, + "LicenseType": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationLicenseType", + "traits": { + "smithy.api#documentation": "

The license availability for the applications.

" + } + }, + "Name": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The name of the WorkSpace application.

" + } + }, + "Owner": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationOwner", + "traits": { + "smithy.api#documentation": "

The owner of the WorkSpace application.

" + } + }, + "State": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationState", + "traits": { + "smithy.api#documentation": "

The status of WorkSpace application.

" + } + }, + "SupportedComputeTypeNames": { + "target": "com.amazonaws.workspaces#ComputeList", + "traits": { + "smithy.api#documentation": "

The supported compute types of the WorkSpace application.

" + } + }, + "SupportedOperatingSystemNames": { + "target": "com.amazonaws.workspaces#OperatingSystemNameList", + "traits": { + "smithy.api#documentation": "

The supported operating systems of the WorkSpace application.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the WorkSpace application.

" + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationDeployment": { + "type": "structure", + "members": { + "Associations": { + "target": "com.amazonaws.workspaces#WorkspaceResourceAssociationList", + "traits": { + "smithy.api#documentation": "

The associations between the applications and the associated resources.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the WorkSpace application deployment.

" + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^wsa-[0-9a-z]{8,63}$" + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#WorkSpaceApplicationId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationLicenseType": { + "type": "enum", + "members": { + "LICENSED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LICENSED" + } + }, + "UNLICENSED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNLICENSED" + } + } + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#WorkSpaceApplication" + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationOwner": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\d{12}|AMAZON$" + } + }, + "com.amazonaws.workspaces#WorkSpaceApplicationState": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR" + } + }, + "AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVAILABLE" + } + }, + "UNINSTALL_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNINSTALL_ONLY" + } + } + } + }, + "com.amazonaws.workspaces#WorkSpaceAssociatedResourceType": { + "type": "enum", + "members": { + "APPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APPLICATION" + } + } + } + }, + "com.amazonaws.workspaces#WorkSpaceAssociatedResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#WorkSpaceAssociatedResourceType" + } + }, "com.amazonaws.workspaces#Workspace": { "type": "structure", "members": { @@ -7185,7 +8425,7 @@ "ErrorDetails": { "target": "com.amazonaws.workspaces#ErrorDetailsList", "traits": { - "smithy.api#documentation": "

The details of the error returned for the image.

" + "smithy.api#documentation": "

Additional details of the error returned for the image, including the \n possible causes of the errors and troubleshooting information.

" } } }, @@ -7532,6 +8772,12 @@ "traits": { "smithy.api#documentation": "

The protocol. For more information, see \n \n Protocols for Amazon WorkSpaces.

\n \n
    \n
  • \n

    Only available for WorkSpaces created with PCoIP bundles.

    \n
  • \n
  • \n

    The Protocols property is case sensitive. Ensure you use PCOIP or WSP.

    \n
  • \n
  • \n

    Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles \n (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn).

    \n
  • \n
\n
" } + }, + "OperatingSystemName": { + "target": "com.amazonaws.workspaces#OperatingSystemName", + "traits": { + "smithy.api#documentation": "

The name of the operating system.

" + } } }, "traits": { @@ -7609,6 +8855,62 @@ } } }, + "com.amazonaws.workspaces#WorkspaceResourceAssociation": { + "type": "structure", + "members": { + "AssociatedResourceId": { + "target": "com.amazonaws.workspaces#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the associated resource.

" + } + }, + "AssociatedResourceType": { + "target": "com.amazonaws.workspaces#WorkSpaceAssociatedResourceType", + "traits": { + "smithy.api#documentation": "

The resource types of the associated resource.

" + } + }, + "Created": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association is created.

" + } + }, + "LastUpdatedTime": { + "target": "com.amazonaws.workspaces#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the association status was last updated.

" + } + }, + "State": { + "target": "com.amazonaws.workspaces#AssociationState", + "traits": { + "smithy.api#documentation": "

The status of the WorkSpace resource association.

" + } + }, + "StateReason": { + "target": "com.amazonaws.workspaces#AssociationStateReason", + "traits": { + "smithy.api#documentation": "

The reason the association deployment failed.

" + } + }, + "WorkspaceId": { + "target": "com.amazonaws.workspaces#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The identifier of the WorkSpace.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the association between an application and a WorkSpace resource.

" + } + }, + "com.amazonaws.workspaces#WorkspaceResourceAssociationList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspaces#WorkspaceResourceAssociation" + } + }, "com.amazonaws.workspaces#WorkspaceState": { "type": "enum", "members": { @@ -7776,6 +9078,9 @@ { "target": "com.amazonaws.workspaces#AssociateIpGroups" }, + { + "target": "com.amazonaws.workspaces#AssociateWorkspaceApplication" + }, { "target": "com.amazonaws.workspaces#AuthorizeIpRules" }, @@ -7830,6 +9135,9 @@ { "target": "com.amazonaws.workspaces#DeleteWorkspaceImage" }, + { + "target": "com.amazonaws.workspaces#DeployWorkspaceApplications" + }, { "target": "com.amazonaws.workspaces#DeregisterWorkspaceDirectory" }, @@ -7839,6 +9147,15 @@ { "target": "com.amazonaws.workspaces#DescribeAccountModifications" }, + { + "target": "com.amazonaws.workspaces#DescribeApplicationAssociations" + }, + { + "target": "com.amazonaws.workspaces#DescribeApplications" + }, + { + "target": "com.amazonaws.workspaces#DescribeBundleAssociations" + }, { "target": "com.amazonaws.workspaces#DescribeClientBranding" }, @@ -7854,12 +9171,18 @@ { "target": "com.amazonaws.workspaces#DescribeConnectionAliasPermissions" }, + { + "target": "com.amazonaws.workspaces#DescribeImageAssociations" + }, { "target": "com.amazonaws.workspaces#DescribeIpGroups" }, { "target": "com.amazonaws.workspaces#DescribeTags" }, + { + "target": "com.amazonaws.workspaces#DescribeWorkspaceAssociations" + }, { "target": "com.amazonaws.workspaces#DescribeWorkspaceBundles" }, @@ -7887,6 +9210,9 @@ { "target": "com.amazonaws.workspaces#DisassociateIpGroups" }, + { + "target": "com.amazonaws.workspaces#DisassociateWorkspaceApplication" + }, { "target": "com.amazonaws.workspaces#ImportClientBranding" },