From fd796c520f7417706600d466f3a354e68d017ccb Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Mon, 21 Sep 2020 11:09:14 -0700 Subject: [PATCH] Release v1.34.28 (2020-09-21) (#3550) Release v1.34.28 (2020-09-21) === ### Service Client Updates * `service/eventbridge`: Updates service API and documentation * `service/events`: Updates service API and documentation * Add support for Redshift Data API Targets * `service/glue`: Updates service API and documentation * Adding support to update multiple partitions of a table in a single request * `service/iotsitewise`: Updates service API and documentation * `service/rds`: Updates service documentation * Documentation updates for the RDS DescribeExportTasks API * `service/resource-groups`: Updates service documentation and paginators * `service/resourcegroupstaggingapi`: Updates service documentation * Documentation updates for the Resource Groups Tagging API. --- CHANGELOG.md | 16 + aws/endpoints/defaults.go | 7 + aws/version.go | 2 +- models/apis/eventbridge/2015-10-07/api-2.json | 46 +- .../apis/eventbridge/2015-10-07/docs-2.json | 39 +- models/apis/events/2015-10-07/api-2.json | 46 +- models/apis/events/2015-10-07/docs-2.json | 39 +- models/apis/glue/2017-03-31/api-2.json | 64 +++ models/apis/glue/2017-03-31/docs-2.json | 46 +- models/apis/iotsitewise/2019-12-02/api-2.json | 75 ++- .../apis/iotsitewise/2019-12-02/docs-2.json | 92 ++-- models/apis/rds/2014-10-31/docs-2.json | 34 +- .../resource-groups/2017-11-27/docs-2.json | 8 +- .../2017-11-27/paginators-1.json | 9 +- .../2017-01-26/docs-2.json | 2 +- models/endpoints/endpoints.json | 7 + service/cloudwatchevents/api.go | 129 ++++++ service/eventbridge/api.go | 129 ++++++ service/glue/api.go | 317 +++++++++++++ service/glue/glueiface/interface.go | 4 + service/iotsitewise/api.go | 434 +++++++++++++++--- .../iotsitewise/iotsitewiseiface/interface.go | 4 + service/rds/api.go | 79 +++- service/resourcegroups/api.go | 92 +++- service/resourcegroups/doc.go | 2 +- service/resourcegroupstaggingapi/doc.go | 18 +- 26 files changed, 1580 insertions(+), 160 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d912f2f901e..e32eba9d031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.34.28 (2020-09-21) +=== + +### Service Client Updates +* `service/eventbridge`: Updates service API and documentation +* `service/events`: Updates service API and documentation + * Add support for Redshift Data API Targets +* `service/glue`: Updates service API and documentation + * Adding support to update multiple partitions of a table in a single request +* `service/iotsitewise`: Updates service API and documentation +* `service/rds`: Updates service documentation + * Documentation updates for the RDS DescribeExportTasks API +* `service/resource-groups`: Updates service documentation and paginators +* `service/resourcegroupstaggingapi`: Updates service documentation + * Documentation updates for the Resource Groups Tagging API. + Release v1.34.27 (2020-09-18) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index adc1dbbd6ba..a184239a48c 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -2199,6 +2199,7 @@ var awsPartition = partition{ "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -5565,6 +5566,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "fips-ap-northeast-3": endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "fips-ap-south-1": endpoint{ Hostname: "snowball-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ diff --git a/aws/version.go b/aws/version.go index af4cc1b00be..71c732eb307 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.34.27" +const SDKVersion = "1.34.28" diff --git a/models/apis/eventbridge/2015-10-07/api-2.json b/models/apis/eventbridge/2015-10-07/api-2.json index c8a7e3b5cf9..4d8f2b396fe 100644 --- a/models/apis/eventbridge/2015-10-07/api-2.json +++ b/models/apis/eventbridge/2015-10-07/api-2.json @@ -563,6 +563,18 @@ "EventSourceArn":{"shape":"String"} } }, + "Database":{ + "type":"string", + "max":64, + "min":1, + "pattern":"([a-zA-Z0-9]+)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + }, + "DbUser":{ + "type":"string", + "max":128, + "min":1, + "pattern":"([a-zA-Z0-9]+)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + }, "DeactivateEventSourceRequest":{ "type":"structure", "required":["Name"], @@ -1218,6 +1230,27 @@ "max":512, "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+" }, + "RedshiftDataParameters":{ + "type":"structure", + "required":[ + "Database", + "Sql" + ], + "members":{ + "SecretManagerArn":{"shape":"RedshiftSecretManagerArn"}, + "Database":{"shape":"Database"}, + "DbUser":{"shape":"DbUser"}, + "Sql":{"shape":"Sql"}, + "StatementName":{"shape":"StatementName"}, + "WithEvent":{"shape":"Boolean"} + } + }, + "RedshiftSecretManagerArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"(^arn:aws([a-z]|\\-)*:secretsmanager:[a-z0-9-.]+:.*)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + }, "RemovePermissionRequest":{ "type":"structure", "required":["StatementId"], @@ -1364,6 +1397,11 @@ "type":"string", "max":256 }, + "Sql":{ + "type":"string", + "max":100000, + "min":1 + }, "SqsParameters":{ "type":"structure", "members":{ @@ -1376,6 +1414,11 @@ "min":1, "pattern":"[a-zA-Z0-9-_]+" }, + "StatementName":{ + "type":"string", + "max":500, + "min":1 + }, "String":{"type":"string"}, "StringList":{ "type":"list", @@ -1444,7 +1487,8 @@ "EcsParameters":{"shape":"EcsParameters"}, "BatchParameters":{"shape":"BatchParameters"}, "SqsParameters":{"shape":"SqsParameters"}, - "HttpParameters":{"shape":"HttpParameters"} + "HttpParameters":{"shape":"HttpParameters"}, + "RedshiftDataParameters":{"shape":"RedshiftDataParameters"} } }, "TargetArn":{ diff --git a/models/apis/eventbridge/2015-10-07/docs-2.json b/models/apis/eventbridge/2015-10-07/docs-2.json index 0e36eb807b6..bf59bd81201 100644 --- a/models/apis/eventbridge/2015-10-07/docs-2.json +++ b/models/apis/eventbridge/2015-10-07/docs-2.json @@ -27,7 +27,7 @@ "PutPartnerEvents": "
This is used by SaaS partners to write events to a customer's partner event bus. AWS customers do not use this operation.
", "PutPermission": "Running PutPermission
permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.
For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.
To enable multiple AWS accounts to put events to your event bus, run PutPermission
once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission
once specifying Principal
as \"*\" and specifying the AWS organization ID in Condition
, to grant permissions to all accounts in that organization.
If you grant permissions using an organization, then accounts in that organization must specify a RoleArn
with proper permissions when they use PutTarget
to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.
The permission policy on the default event bus cannot exceed 10 KB in size.
", "PutRule": "Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.
A single rule watches for events from a single event bus. Events generated by AWS services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus.
If you are updating an existing rule, the rule is replaced with what you specify in this PutRule
command. If you omit arguments in PutRule
, the old values for those arguments are not kept. Instead, they are replaced with null values.
When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.
A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.
When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule
operation and assign tags, you must have both the events:PutRule
and events:TagResource
permissions.
If you are updating an existing rule, any tags you specify in the PutRule
operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.
Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.
In EventBridge, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.
To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.
An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.
", - "PutTargets": "Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.
Targets are the resources that are invoked when a rule is triggered.
You can configure the following as targets for Events:
EC2 instances
SSM Run Command
SSM Automation
AWS Lambda functions
Data streams in Amazon Kinesis Data Streams
Data delivery streams in Amazon Kinesis Data Firehose
Amazon ECS tasks
AWS Step Functions state machines
AWS Batch jobs
AWS CodeBuild projects
Pipelines in AWS CodePipeline
Amazon Inspector assessment templates
Amazon SNS topics
Amazon SQS queues, including FIFO queues
The default event bus of another AWS account
Amazon API Gateway REST APIs
Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call
, EC2 RebootInstances API call
, EC2 StopInstances API call
, and EC2 TerminateInstances API call
.
For some target types, PutTargets
provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters
argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters
field.
To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN
argument in PutTargets
. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.
If another AWS account is in the same region and has granted you permission (using PutPermission
), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn
value when you run PutTargets
. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.
Input
, InputPath
, and InputTransformer
are not available with PutTarget
if the target is an event bus of a different AWS account.
If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn
with proper permissions in the Target
structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.
For more information about enabling cross-account events, see PutPermission.
Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:
If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).
If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.
If InputPath is specified in the form of JSONPath (for example, $.detail
), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).
If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.
When you specify InputPath
or InputTransformer
, you must use JSON dot notation, not bracket notation.
When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount
is non-zero in the response and each entry in FailedEntries
provides the ID of the failed target and the error code.
Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.
Targets are the resources that are invoked when a rule is triggered.
You can configure the following as targets for Events:
EC2 instances
SSM Run Command
SSM Automation
AWS Lambda functions
Data streams in Amazon Kinesis Data Streams
Data delivery streams in Amazon Kinesis Data Firehose
Amazon ECS tasks
AWS Step Functions state machines
AWS Batch jobs
AWS CodeBuild projects
Pipelines in AWS CodePipeline
Amazon Inspector assessment templates
Amazon SNS topics
Amazon SQS queues, including FIFO queues
The default event bus of another AWS account
Amazon API Gateway REST APIs
Redshift Clusters to invoke Data API ExecuteStatement on
Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call
, EC2 RebootInstances API call
, EC2 StopInstances API call
, and EC2 TerminateInstances API call
.
For some target types, PutTargets
provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters
argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters
field.
To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN
argument in PutTargets
. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.
If another AWS account is in the same region and has granted you permission (using PutPermission
), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn
value when you run PutTargets
. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.
Input
, InputPath
, and InputTransformer
are not available with PutTarget
if the target is an event bus of a different AWS account.
If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn
with proper permissions in the Target
structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.
For more information about enabling cross-account events, see PutPermission.
Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:
If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).
If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.
If InputPath is specified in the form of JSONPath (for example, $.detail
), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).
If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.
When you specify InputPath
or InputTransformer
, you must use JSON dot notation, not bracket notation.
When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount
is non-zero in the response and each entry in FailedEntries
provides the ID of the failed target and the error code.
Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId
value that you associated with the account when you granted it permission with PutPermission
. You can find the StatementId
by using DescribeEventBus.
Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.
When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount
is non-zero in the response and each entry in FailedEntries
provides the ID of the failed target and the error code.
Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In EventBridge, rules and event buses can be tagged.
Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.
You can use the TagResource
action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a resource.
", @@ -97,6 +97,7 @@ "base": null, "refs": { "DeleteRuleRequest$Force": "If this is a managed rule, created by an AWS service on your behalf, you must specify Force
as True
to delete the rule. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule
or ListRules
and checking the ManagedBy
field of the response.
Indicates whether to send an event back to EventBridge after the SQL statement runs.
", "RemoveTargetsRequest$Force": "If this is a managed rule, created by an AWS service on your behalf, you must specify Force
as True
to remove targets. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule
or ListRules
and checking the ManagedBy
field of the response.
Indicates whether the event matches the event pattern.
" } @@ -132,6 +133,18 @@ "refs": { } }, + "Database": { + "base": null, + "refs": { + "RedshiftDataParameters$Database": "The name of the database. Required when authenticating using temporary credentials.
" + } + }, + "DbUser": { + "base": null, + "refs": { + "RedshiftDataParameters$DbUser": "The database user name. Required when authenticating using temporary credentials.
" + } + }, "DeactivateEventSourceRequest": { "base": null, "refs": { @@ -744,6 +757,18 @@ "QueryStringParametersMap$value": null } }, + "RedshiftDataParameters": { + "base": "These are custom parameters to be used when the target is a Redshift cluster to invoke the Redshift Data API ExecuteStatement based on EventBridge events.
", + "refs": { + "Target$RedshiftDataParameters": "Contains the Redshift Data API parameters to use when the target is a Redshift cluster.
If you specify a Redshift Cluster as a Target, you can use this to specify parameters to invoke the Redshift Data API ExecuteStatement based on EventBridge events.
" + } + }, + "RedshiftSecretManagerArn": { + "base": null, + "refs": { + "RedshiftDataParameters$SecretManagerArn": "The name or ARN of the secret that enables access to the database. Required when authenticating using AWS Secrets Manager.
" + } + }, "RemovePermissionRequest": { "base": null, "refs": { @@ -893,6 +918,12 @@ "Rule$ScheduleExpression": "The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".
" } }, + "Sql": { + "base": null, + "refs": { + "RedshiftDataParameters$Sql": "The SQL statement text to run.
" + } + }, "SqsParameters": { "base": "This structure includes the custom parameter to be used when the target is an SQS FIFO queue.
", "refs": { @@ -906,6 +937,12 @@ "RemovePermissionRequest$StatementId": "The statement ID corresponding to the account that is no longer allowed to put events to the default event bus.
" } }, + "StatementName": { + "base": null, + "refs": { + "RedshiftDataParameters$StatementName": "The name of the SQL statement. You can name the SQL statement when you create it to identify the query.
" + } + }, "String": { "base": null, "refs": { diff --git a/models/apis/events/2015-10-07/api-2.json b/models/apis/events/2015-10-07/api-2.json index cf8ba31f70c..6e55a18c65f 100644 --- a/models/apis/events/2015-10-07/api-2.json +++ b/models/apis/events/2015-10-07/api-2.json @@ -563,6 +563,18 @@ "EventSourceArn":{"shape":"String"} } }, + "Database":{ + "type":"string", + "max":64, + "min":1, + "pattern":"([a-zA-Z0-9]+)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + }, + "DbUser":{ + "type":"string", + "max":128, + "min":1, + "pattern":"([a-zA-Z0-9]+)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + }, "DeactivateEventSourceRequest":{ "type":"structure", "required":["Name"], @@ -1218,6 +1230,27 @@ "max":512, "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+" }, + "RedshiftDataParameters":{ + "type":"structure", + "required":[ + "Database", + "Sql" + ], + "members":{ + "SecretManagerArn":{"shape":"RedshiftSecretManagerArn"}, + "Database":{"shape":"Database"}, + "DbUser":{"shape":"DbUser"}, + "Sql":{"shape":"Sql"}, + "StatementName":{"shape":"StatementName"}, + "WithEvent":{"shape":"Boolean"} + } + }, + "RedshiftSecretManagerArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"(^arn:aws([a-z]|\\-)*:secretsmanager:[a-z0-9-.]+:.*)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + }, "RemovePermissionRequest":{ "type":"structure", "required":["StatementId"], @@ -1364,6 +1397,11 @@ "type":"string", "max":256 }, + "Sql":{ + "type":"string", + "max":100000, + "min":1 + }, "SqsParameters":{ "type":"structure", "members":{ @@ -1376,6 +1414,11 @@ "min":1, "pattern":"[a-zA-Z0-9-_]+" }, + "StatementName":{ + "type":"string", + "max":500, + "min":1 + }, "String":{"type":"string"}, "StringList":{ "type":"list", @@ -1444,7 +1487,8 @@ "EcsParameters":{"shape":"EcsParameters"}, "BatchParameters":{"shape":"BatchParameters"}, "SqsParameters":{"shape":"SqsParameters"}, - "HttpParameters":{"shape":"HttpParameters"} + "HttpParameters":{"shape":"HttpParameters"}, + "RedshiftDataParameters":{"shape":"RedshiftDataParameters"} } }, "TargetArn":{ diff --git a/models/apis/events/2015-10-07/docs-2.json b/models/apis/events/2015-10-07/docs-2.json index 0e36eb807b6..bf59bd81201 100644 --- a/models/apis/events/2015-10-07/docs-2.json +++ b/models/apis/events/2015-10-07/docs-2.json @@ -27,7 +27,7 @@ "PutPartnerEvents": "This is used by SaaS partners to write events to a customer's partner event bus. AWS customers do not use this operation.
", "PutPermission": "Running PutPermission
permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.
For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.
To enable multiple AWS accounts to put events to your event bus, run PutPermission
once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission
once specifying Principal
as \"*\" and specifying the AWS organization ID in Condition
, to grant permissions to all accounts in that organization.
If you grant permissions using an organization, then accounts in that organization must specify a RoleArn
with proper permissions when they use PutTarget
to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.
The permission policy on the default event bus cannot exceed 10 KB in size.
", "PutRule": "Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule.
A single rule watches for events from a single event bus. Events generated by AWS services go to your account's default event bus. Events generated by SaaS partner services or applications go to the matching partner event bus. If you have custom applications or services, you can specify whether their events go to your default event bus or a custom event bus that you have created. For more information, see CreateEventBus.
If you are updating an existing rule, the rule is replaced with what you specify in this PutRule
command. If you omit arguments in PutRule
, the old values for those arguments are not kept. Instead, they are replaced with null values.
When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Allow a short period of time for changes to take effect.
A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.
When you initially create a rule, you can optionally assign one or more tags to the rule. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only rules with certain tag values. To use the PutRule
operation and assign tags, you must have both the events:PutRule
and events:TagResource
permissions.
If you are updating an existing rule, any tags you specify in the PutRule
operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.
Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.
In EventBridge, it is possible to create rules that lead to infinite loops, where a rule is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket, and trigger software to change them to the desired state. If the rule is not written carefully, the subsequent change to the ACLs fires the rule again, creating an infinite loop.
To prevent this, write the rules so that the triggered actions do not re-fire the same rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead of after any change.
An infinite loop can quickly cause higher than expected charges. We recommend that you use budgeting, which alerts you when charges exceed your specified limit. For more information, see Managing Your Costs with Budgets.
", - "PutTargets": "Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.
Targets are the resources that are invoked when a rule is triggered.
You can configure the following as targets for Events:
EC2 instances
SSM Run Command
SSM Automation
AWS Lambda functions
Data streams in Amazon Kinesis Data Streams
Data delivery streams in Amazon Kinesis Data Firehose
Amazon ECS tasks
AWS Step Functions state machines
AWS Batch jobs
AWS CodeBuild projects
Pipelines in AWS CodePipeline
Amazon Inspector assessment templates
Amazon SNS topics
Amazon SQS queues, including FIFO queues
The default event bus of another AWS account
Amazon API Gateway REST APIs
Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call
, EC2 RebootInstances API call
, EC2 StopInstances API call
, and EC2 TerminateInstances API call
.
For some target types, PutTargets
provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters
argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters
field.
To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN
argument in PutTargets
. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.
If another AWS account is in the same region and has granted you permission (using PutPermission
), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn
value when you run PutTargets
. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.
Input
, InputPath
, and InputTransformer
are not available with PutTarget
if the target is an event bus of a different AWS account.
If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn
with proper permissions in the Target
structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.
For more information about enabling cross-account events, see PutPermission.
Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:
If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).
If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.
If InputPath is specified in the form of JSONPath (for example, $.detail
), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).
If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.
When you specify InputPath
or InputTransformer
, you must use JSON dot notation, not bracket notation.
When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount
is non-zero in the response and each entry in FailedEntries
provides the ID of the failed target and the error code.
Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.
Targets are the resources that are invoked when a rule is triggered.
You can configure the following as targets for Events:
EC2 instances
SSM Run Command
SSM Automation
AWS Lambda functions
Data streams in Amazon Kinesis Data Streams
Data delivery streams in Amazon Kinesis Data Firehose
Amazon ECS tasks
AWS Step Functions state machines
AWS Batch jobs
AWS CodeBuild projects
Pipelines in AWS CodePipeline
Amazon Inspector assessment templates
Amazon SNS topics
Amazon SQS queues, including FIFO queues
The default event bus of another AWS account
Amazon API Gateway REST APIs
Redshift Clusters to invoke Data API ExecuteStatement on
Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call
, EC2 RebootInstances API call
, EC2 StopInstances API call
, and EC2 TerminateInstances API call
.
For some target types, PutTargets
provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters
argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters
field.
To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN
argument in PutTargets
. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.
If another AWS account is in the same region and has granted you permission (using PutPermission
), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn
value when you run PutTargets
. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.
Input
, InputPath
, and InputTransformer
are not available with PutTarget
if the target is an event bus of a different AWS account.
If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn
with proper permissions in the Target
structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.
For more information about enabling cross-account events, see PutPermission.
Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:
If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).
If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.
If InputPath is specified in the form of JSONPath (for example, $.detail
), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).
If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.
When you specify InputPath
or InputTransformer
, you must use JSON dot notation, not bracket notation.
When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount
is non-zero in the response and each entry in FailedEntries
provides the ID of the failed target and the error code.
Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId
value that you associated with the account when you granted it permission with PutPermission
. You can find the StatementId
by using DescribeEventBus.
Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.
When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount
is non-zero in the response and each entry in FailedEntries
provides the ID of the failed target and the error code.
Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In EventBridge, rules and event buses can be tagged.
Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.
You can use the TagResource
action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a resource.
", @@ -97,6 +97,7 @@ "base": null, "refs": { "DeleteRuleRequest$Force": "If this is a managed rule, created by an AWS service on your behalf, you must specify Force
as True
to delete the rule. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule
or ListRules
and checking the ManagedBy
field of the response.
Indicates whether to send an event back to EventBridge after the SQL statement runs.
", "RemoveTargetsRequest$Force": "If this is a managed rule, created by an AWS service on your behalf, you must specify Force
as True
to remove targets. This parameter is ignored for rules that are not managed rules. You can check whether a rule is a managed rule by using DescribeRule
or ListRules
and checking the ManagedBy
field of the response.
Indicates whether the event matches the event pattern.
" } @@ -132,6 +133,18 @@ "refs": { } }, + "Database": { + "base": null, + "refs": { + "RedshiftDataParameters$Database": "The name of the database. Required when authenticating using temporary credentials.
" + } + }, + "DbUser": { + "base": null, + "refs": { + "RedshiftDataParameters$DbUser": "The database user name. Required when authenticating using temporary credentials.
" + } + }, "DeactivateEventSourceRequest": { "base": null, "refs": { @@ -744,6 +757,18 @@ "QueryStringParametersMap$value": null } }, + "RedshiftDataParameters": { + "base": "These are custom parameters to be used when the target is a Redshift cluster to invoke the Redshift Data API ExecuteStatement based on EventBridge events.
", + "refs": { + "Target$RedshiftDataParameters": "Contains the Redshift Data API parameters to use when the target is a Redshift cluster.
If you specify a Redshift Cluster as a Target, you can use this to specify parameters to invoke the Redshift Data API ExecuteStatement based on EventBridge events.
" + } + }, + "RedshiftSecretManagerArn": { + "base": null, + "refs": { + "RedshiftDataParameters$SecretManagerArn": "The name or ARN of the secret that enables access to the database. Required when authenticating using AWS Secrets Manager.
" + } + }, "RemovePermissionRequest": { "base": null, "refs": { @@ -893,6 +918,12 @@ "Rule$ScheduleExpression": "The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".
" } }, + "Sql": { + "base": null, + "refs": { + "RedshiftDataParameters$Sql": "The SQL statement text to run.
" + } + }, "SqsParameters": { "base": "This structure includes the custom parameter to be used when the target is an SQS FIFO queue.
", "refs": { @@ -906,6 +937,12 @@ "RemovePermissionRequest$StatementId": "The statement ID corresponding to the account that is no longer allowed to put events to the default event bus.
" } }, + "StatementName": { + "base": null, + "refs": { + "RedshiftDataParameters$StatementName": "The name of the SQL statement. You can name the SQL statement when you create it to identify the query.
" + } + }, "String": { "base": null, "refs": { diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index a58a24a3aa5..c21d61f8c27 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -188,6 +188,22 @@ {"shape":"OperationTimeoutException"} ] }, + "BatchUpdatePartition":{ + "name":"BatchUpdatePartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchUpdatePartitionRequest"}, + "output":{"shape":"BatchUpdatePartitionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"}, + {"shape":"GlueEncryptionException"} + ] + }, "CancelMLTaskRun":{ "name":"CancelMLTaskRun", "http":{ @@ -2352,6 +2368,54 @@ "type":"list", "member":{"shape":"BatchStopJobRunSuccessfulSubmission"} }, + "BatchUpdatePartitionFailureEntry":{ + "type":"structure", + "members":{ + "PartitionValueList":{"shape":"BoundedPartitionValueList"}, + "ErrorDetail":{"shape":"ErrorDetail"} + } + }, + "BatchUpdatePartitionFailureList":{ + "type":"list", + "member":{"shape":"BatchUpdatePartitionFailureEntry"} + }, + "BatchUpdatePartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "Entries" + ], + "members":{ + "CatalogId":{"shape":"CatalogIdString"}, + "DatabaseName":{"shape":"NameString"}, + "TableName":{"shape":"NameString"}, + "Entries":{"shape":"BatchUpdatePartitionRequestEntryList"} + } + }, + "BatchUpdatePartitionRequestEntry":{ + "type":"structure", + "required":[ + "PartitionValueList", + "PartitionInput" + ], + "members":{ + "PartitionValueList":{"shape":"BoundedPartitionValueList"}, + "PartitionInput":{"shape":"PartitionInput"} + } + }, + "BatchUpdatePartitionRequestEntryList":{ + "type":"list", + "member":{"shape":"BatchUpdatePartitionRequestEntry"}, + "max":100, + "min":1 + }, + "BatchUpdatePartitionResponse":{ + "type":"structure", + "members":{ + "Errors":{"shape":"BatchUpdatePartitionFailureList"} + } + }, "BinaryColumnStatisticsData":{ "type":"structure", "required":[ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 8a2c6d26d09..363173dbbc6 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -14,6 +14,7 @@ "BatchGetTriggers": "Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers
operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows
operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
Stops one or more job runs for a specified job definition.
", + "BatchUpdatePartition": "Updates one or more partitions in a batch operation.
", "CancelMLTaskRun": "Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun
with a task run's parent transform's TransformID
and the task run's TaskRunId
.
Creates a classifier in the user's account. This can be a GrokClassifier
, an XMLClassifier
, a JsonClassifier
, or a CsvClassifier
, depending on which field of the request is present.
Creates a connection definition in the Data Catalog.
", @@ -343,6 +344,40 @@ "BatchStopJobRunResponse$SuccessfulSubmissions": "A list of the JobRuns that were successfully submitted for stopping.
" } }, + "BatchUpdatePartitionFailureEntry": { + "base": "Contains information about a batch update partition error.
", + "refs": { + "BatchUpdatePartitionFailureList$member": null + } + }, + "BatchUpdatePartitionFailureList": { + "base": null, + "refs": { + "BatchUpdatePartitionResponse$Errors": "The errors encountered when trying to update the requested partitions. A list of BatchUpdatePartitionFailureEntry
objects.
A structure that contains the values and structure used to update a partition.
", + "refs": { + "BatchUpdatePartitionRequestEntryList$member": null + } + }, + "BatchUpdatePartitionRequestEntryList": { + "base": null, + "refs": { + "BatchUpdatePartitionRequest$Entries": "A list of up to 100 BatchUpdatePartitionRequestEntry
objects to update.
Defines a binary column statistics data.
", "refs": { @@ -392,6 +427,8 @@ "BoundedPartitionValueList": { "base": null, "refs": { + "BatchUpdatePartitionFailureEntry$PartitionValueList": "A list of values defining the partitions.
", + "BatchUpdatePartitionRequestEntry$PartitionValueList": "A list of values defining the partitions.
", "UpdatePartitionRequest$PartitionValueList": "List of partition key values that define the partition to update.
" } }, @@ -435,6 +472,7 @@ "BatchDeleteTableRequest$CatalogId": "The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
", "BatchDeleteTableVersionRequest$CatalogId": "The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
", "BatchGetPartitionRequest$CatalogId": "The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.
", + "BatchUpdatePartitionRequest$CatalogId": "The ID of the catalog in which the partition is to be updated. Currently, this should be the AWS account ID.
", "CreateConnectionRequest$CatalogId": "The ID of the Data Catalog in which to create the connection. If none is provided, the AWS account ID is used by default.
", "CreateDatabaseRequest$CatalogId": "The ID of the Data Catalog in which to create the database. If none is provided, the AWS account ID is used by default.
", "CreatePartitionRequest$CatalogId": "The AWS account ID of the catalog in which the partition is to be created.
", @@ -787,7 +825,7 @@ "ConnectionProperties": { "base": null, "refs": { - "Connection$ConnectionProperties": "These key-value pairs define parameters for the connection:
HOST
- The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT
- The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME
- The name under which to log in to the database. The value string for USER_NAME
is \"USERNAME
\".
PASSWORD
- A password, if one is used, for the user name.
ENCRYPTED_PASSWORD
- When you enable connection password protection by setting ConnectionPasswordEncryption
in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI
- The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME
- The class name of the JDBC driver to use.
JDBC_ENGINE
- The name of the JDBC engine to use.
JDBC_ENGINE_VERSION
- The version of the JDBC engine to use.
CONFIG_FILES
- (Reserved for future use.)
INSTANCE_ID
- The instance ID to use.
JDBC_CONNECTION_URL
- The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL
- A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT
- An Amazon S3 location specifying the customer's root certificate. AWS Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION
- By default, this is false
. AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true
to skip AWS Glue’s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING
- A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN
; in Microsoft SQL Server, this is used as the hostNameInCertificate
.
CONNECTION_URL
- The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS
- A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
These key-value pairs define parameters for the connection:
HOST
- The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT
- The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME
- The name under which to log in to the database. The value string for USER_NAME
is \"USERNAME
\".
PASSWORD
- A password, if one is used, for the user name.
ENCRYPTED_PASSWORD
- When you enable connection password protection by setting ConnectionPasswordEncryption
in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI
- The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME
- The class name of the JDBC driver to use.
JDBC_ENGINE
- The name of the JDBC engine to use.
JDBC_ENGINE_VERSION
- The version of the JDBC engine to use.
CONFIG_FILES
- (Reserved for future use.)
INSTANCE_ID
- The instance ID to use.
JDBC_CONNECTION_URL
- The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL
- A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT
- An Amazon S3 location specifying the customer's root certificate. AWS Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION
- By default, this is false
. AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true
to skip AWS Glue’s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING
- A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN
; in Microsoft SQL Server, this is used as the hostNameInCertificate
.
CONNECTION_URL
- The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS
- A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
KAFKA_SSL_ENABLED
- Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
KAFKA_CUSTOM_CERT
- The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
KAFKA_SKIP_CUSTOM_CERT_VALIDATION
- Whether to skip the validation of the CA cert file or not. AWS Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
These key-value pairs define parameters for the connection.
" } }, @@ -801,7 +839,7 @@ "base": null, "refs": { "Connection$ConnectionType": "The type of the connection. Currently, SFTP is not supported.
", - "ConnectionInput$ConnectionType": "The type of the connection. Currently, these types are supported:
JDBC
- Designates a connection to a database through Java Database Connectivity (JDBC).
KAFKA
- Designates a connection to an Apache Kafka streaming platform.
MONGODB
- Designates a connection to a MongoDB document database.
SFTP is not supported.
", + "ConnectionInput$ConnectionType": "The type of the connection. Currently, these types are supported:
JDBC
- Designates a connection to a database through Java Database Connectivity (JDBC).
KAFKA
- Designates a connection to an Apache Kafka streaming platform.
MONGODB
- Designates a connection to a MongoDB document database.
NETWORK
- Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).
SFTP is not supported.
", "GetConnectionsFilter$ConnectionType": "The type of connections to return. Currently, SFTP is not supported.
" } }, @@ -1542,6 +1580,7 @@ "base": "Contains details about an error.
", "refs": { "BatchStopJobRunError$ErrorDetail": "Specifies details about the error that was encountered.
", + "BatchUpdatePartitionFailureEntry$ErrorDetail": "The details about the batch update partition error.
", "ColumnError$Error": "The error message occurred during operation.
", "ColumnStatisticsError$Error": "The error message occurred during operation.
", "ErrorByName$value": null, @@ -2854,6 +2893,8 @@ "BatchStopJobRunError$JobName": "The name of the job definition that is used in the job run in question.
", "BatchStopJobRunRequest$JobName": "The name of the job definition for which to stop job runs.
", "BatchStopJobRunSuccessfulSubmission$JobName": "The name of the job definition used in the job run that was stopped.
", + "BatchUpdatePartitionRequest$DatabaseName": "The name of the metadata database in which the partition is to be updated.
", + "BatchUpdatePartitionRequest$TableName": "The name of the metadata table in which the partition is to be updated.
", "CatalogEntry$DatabaseName": "The database in which the table metadata resides.
", "CatalogEntry$TableName": "The name of the table in question.
", "CatalogImportStatus$ImportedBy": "The name of the person who initiated the migration.
", @@ -3359,6 +3400,7 @@ "PartitionInput": { "base": "The structure used to create and update a partition.
", "refs": { + "BatchUpdatePartitionRequestEntry$PartitionInput": "The structure used to update a partition.
", "CreatePartitionRequest$PartitionInput": "A PartitionInput
structure defining the partition to be created.
The new partition object to update the partition to.
The Values
property can't be changed. If you want to change the partition key values for a partition, delete and recreate the partition.
Associates a group (batch) of assets with an AWS IoT SiteWise Monitor project.
", "BatchDisassociateProjectAssets": "Disassociates a group (batch) of assets from an AWS IoT SiteWise Monitor project.
", "BatchPutAssetPropertyValue": "Sends a list of asset property values to AWS IoT SiteWise. Each value is a timestamp-quality-value (TQV) data point. For more information, see Ingesting data using the API in the AWS IoT SiteWise User Guide.
To identify an asset property, you must specify one of the following:
The assetId
and propertyId
of an asset property.
A propertyAlias
, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature
). To define an asset property's alias, see UpdateAssetProperty.
With respect to Unix epoch time, AWS IoT SiteWise accepts only TQVs that have a timestamp of no more than 15 minutes in the past and no more than 5 minutes in the future. AWS IoT SiteWise rejects timestamps outside of the inclusive range of [-15, +5] minutes and returns a TimestampOutOfRangeException
error.
For each asset property, AWS IoT SiteWise overwrites TQVs with duplicate timestamps unless the newer TQV has a different quality. For example, if you store a TQV {T1, GOOD, V1}
, then storing {T1, GOOD, V2}
replaces the existing TQV.
AWS IoT SiteWise authorizes access to each BatchPutAssetPropertyValue
entry individually. For more information, see BatchPutAssetPropertyValue authorization in the AWS IoT SiteWise User Guide.
Creates an access policy that grants the specified AWS Single Sign-On user or group access to the specified AWS IoT SiteWise Monitor portal or project resource.
", + "CreateAccessPolicy": "Creates an access policy that grants the specified identity (AWS SSO user, AWS SSO group, or IAM user) access to the specified AWS IoT SiteWise Monitor portal or project resource.
", "CreateAsset": "Creates an asset from an existing asset model. For more information, see Creating assets in the AWS IoT SiteWise User Guide.
", "CreateAssetModel": "Creates an asset model from specified property and hierarchy definitions. You create assets from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining asset models in the AWS IoT SiteWise User Guide.
", "CreateDashboard": "Creates a dashboard in an AWS IoT SiteWise Monitor project.
", "CreateGateway": "Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.
", - "CreatePortal": "Creates a portal, which can contain projects and dashboards. Before you can create a portal, you must enable AWS Single Sign-On. AWS IoT SiteWise Monitor uses AWS SSO to manage user permissions. For more information, see Enabling AWS SSO in the AWS IoT SiteWise User Guide.
Before you can sign in to a new portal, you must add at least one AWS SSO user or group to that portal. For more information, see Adding or removing portal administrators in the AWS IoT SiteWise User Guide.
Creates a portal, which can contain projects and dashboards. AWS IoT SiteWise Monitor uses AWS SSO or IAM to authenticate portal users and manage user permissions.
Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the AWS IoT SiteWise User Guide.
Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains a session token that lets the IAM user access the portal.
", "CreateProject": "Creates a project in the specified portal.
", - "DeleteAccessPolicy": "Deletes an access policy that grants the specified AWS Single Sign-On identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.
", + "DeleteAccessPolicy": "Deletes an access policy that grants the specified identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.
", "DeleteAsset": "Deletes an asset. This action can't be undone. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.
You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.
Deletes an asset model. This action can't be undone. You must delete all assets created from an asset model before you can delete the model. Also, you can't delete an asset model if a parent asset model exists that contains a property formula expression that depends on the asset model that you want to delete. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.
", "DeleteDashboard": "Deletes a dashboard from AWS IoT SiteWise Monitor.
", - "DeleteGateway": "Deletes a gateway from AWS IoT SiteWise. When you delete a gateway, some of the gateway's files remain in your gateway's file system. For more information, see Data retention in the AWS IoT SiteWise User Guide.
", + "DeleteGateway": "Deletes a gateway from AWS IoT SiteWise. When you delete a gateway, some of the gateway's files remain in your gateway's file system.
", "DeletePortal": "Deletes a portal from AWS IoT SiteWise Monitor.
", "DeleteProject": "Deletes a project from AWS IoT SiteWise Monitor.
", - "DescribeAccessPolicy": "Describes an access policy, which specifies an AWS SSO user or group's access to an AWS IoT SiteWise Monitor portal or project.
", + "DescribeAccessPolicy": "Describes an access policy, which specifies an identity's access to an AWS IoT SiteWise Monitor portal or project.
", "DescribeAsset": "Retrieves information about an asset.
", "DescribeAssetModel": "Retrieves information about an asset model.
", "DescribeAssetProperty": "Retrieves information about an asset property.
When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.
This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.
", @@ -34,7 +35,7 @@ "GetAssetPropertyAggregates": "Gets aggregated values for an asset property. For more information, see Querying aggregates in the AWS IoT SiteWise User Guide.
To identify an asset property, you must specify one of the following:
The assetId
and propertyId
of an asset property.
A propertyAlias
, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature
). To define an asset property's alias, see UpdateAssetProperty.
Gets an asset property's current value. For more information, see Querying current values in the AWS IoT SiteWise User Guide.
To identify an asset property, you must specify one of the following:
The assetId
and propertyId
of an asset property.
A propertyAlias
, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature
). To define an asset property's alias, see UpdateAssetProperty.
Gets the history of an asset property's values. For more information, see Querying historical values in the AWS IoT SiteWise User Guide.
To identify an asset property, you must specify one of the following:
The assetId
and propertyId
of an asset property.
A propertyAlias
, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature
). To define an asset property's alias, see UpdateAssetProperty.
Retrieves a paginated list of access policies for an AWS SSO identity (a user or group) or an AWS IoT SiteWise Monitor resource (a portal or project).
", + "ListAccessPolicies": "Retrieves a paginated list of access policies for an identity (an AWS SSO user, an AWS SSO group, or an IAM user) or an AWS IoT SiteWise Monitor resource (a portal or project).
", "ListAssetModels": "Retrieves a paginated list of summaries of all asset models.
", "ListAssets": "Retrieves a paginated list of asset summaries.
You can use this operation to do the following:
List assets based on a specific asset model.
List top-level assets.
You can't use this operation to list all assets. To retrieve summaries for all of your assets, use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all assets for each asset model.
", "ListAssociatedAssets": "Retrieves a paginated list of associated assets.
You can use this operation to do the following:
List child assets associated to a parent asset by a hierarchy that you specify.
List an asset's parent asset.
Sets logging options for AWS IoT SiteWise.
", "TagResource": "Adds tags to an AWS IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.
", "UntagResource": "Removes a tag from an AWS IoT SiteWise resource.
", - "UpdateAccessPolicy": "Updates an existing access policy that specifies an AWS SSO user or group's access to an AWS IoT SiteWise Monitor portal or project resource.
", + "UpdateAccessPolicy": "Updates an existing access policy that specifies an identity's access to an AWS IoT SiteWise Monitor portal or project resource.
", "UpdateAsset": "Updates an asset's name. For more information, see Updating assets and models in the AWS IoT SiteWise User Guide.
", - "UpdateAssetModel": "Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the AWS IoT SiteWise User Guide.
This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel.
If you remove a property from an asset model or update a property's formula expression, AWS IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, AWS IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property.
Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the AWS IoT SiteWise User Guide.
This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel.
If you remove a property from an asset model, AWS IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, AWS IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property.
Updates an asset property's alias and notification state.
This operation overwrites the property's existing alias and notification state. To keep your existing property's alias or notification state, you must include the existing values in the UpdateAssetProperty request. For more information, see DescribeAssetProperty.
Updates an AWS IoT SiteWise Monitor dashboard.
", "UpdateGateway": "Updates a gateway's name.
", @@ -81,6 +82,8 @@ "DescribePortalResponse$roleArn": "The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.
", "DescribeProjectResponse$projectArn": "The ARN of the project, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:project/${ProjectId}
The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the AWS IoT Greengrass API Reference.
", + "IAMUserIdentity$arn": "The ARN of the IAM user. IAM users must have the iotsitewise:CreatePresignedPortalUrl
permission to sign in to the portal. For more information, see IAM ARNs in the IAM User Guide.
If you delete the IAM user, access policies that contain this identity include an empty arn
. You can delete the access policy for the IAM user that no longer exists.
The ARN of the IAM user. For more information, see IAM ARNs in the IAM User Guide. This parameter is required if you specify IAM
for identityType
.
The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.
", "UpdatePortalRequest$roleArn": "The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.
" } @@ -92,7 +95,7 @@ } }, "AccessPolicySummary": { - "base": "Contains an access policy that defines an AWS SSO identity's access to an AWS IoT SiteWise Monitor resource.
", + "base": "Contains an access policy that defines an identity's access to an AWS IoT SiteWise Monitor resource.
", "refs": { "AccessPolicySummaries$member": null } @@ -357,6 +360,13 @@ "PropertyType$attribute": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an IIoT wind turbine.
" } }, + "AuthMode": { + "base": null, + "refs": { + "CreatePortalRequest$portalAuthMode": "The service to use to authenticate users to the portal. Choose from the following options:
SSO
– The portal uses AWS Single Sign-On to authenticate users and manage user permissions. Before you can create a portal that uses AWS SSO, you must enable AWS SSO. For more information, see Enabling AWS SSO in the AWS IoT SiteWise User Guide. This option is only available in AWS Regions other than the China Regions.
IAM
– The portal uses AWS Identity and Access Management (IAM) to authenticate users and manage user permissions. IAM users must have the iotsitewise:CreatePresignedPortalUrl
permission to sign in to the portal. This option is only available in the China Regions.
You can't change this value after you create a portal.
Default: SSO
The service to use to authenticate users to the portal.
" + } + }, "BatchAssociateProjectAssetsErrors": { "base": null, "refs": { @@ -547,6 +557,16 @@ "refs": { } }, + "CreatePresignedPortalUrlRequest": { + "base": null, + "refs": { + } + }, + "CreatePresignedPortalUrlResponse": { + "base": null, + "refs": { + } + }, "CreateProjectRequest": { "base": null, "refs": { @@ -914,7 +934,13 @@ "GroupIdentity": { "base": "Contains information for a group identity in an access policy.
", "refs": { - "Identity$group": "A group identity.
" + "Identity$group": "An AWS SSO group identity.
" + } + }, + "IAMUserIdentity": { + "base": "Contains information about an AWS Identity and Access Management (IAM) user.
", + "refs": { + "Identity$iamUser": "An IAM user identity.
" } }, "ID": { @@ -947,6 +973,7 @@ "CreateDashboardResponse$dashboardId": "The ID of the dashboard.
", "CreateGatewayResponse$gatewayId": "The ID of the gateway device. You can use this ID when you call other AWS IoT SiteWise APIs.
", "CreatePortalResponse$portalId": "The ID of the created portal.
", + "CreatePresignedPortalUrlRequest$portalId": "The ID of the portal to access.
", "CreateProjectRequest$portalId": "The ID of the portal in which to create the project.
", "CreateProjectResponse$projectId": "The ID of the project.
", "DashboardSummary$id": "The ID of the dashboard.
", @@ -1027,26 +1054,26 @@ } }, "Identity": { - "base": "Contains an AWS SSO identity ID for a user or group.
Currently, you can't use AWS APIs to retrieve AWS SSO identity IDs. You can find the AWS SSO identity IDs in the URL of user and group pages in the AWS SSO console.
Contains an identity that can access an AWS IoT SiteWise Monitor resource.
Currently, you can't use AWS APIs to retrieve AWS SSO identity IDs. You can find the AWS SSO identity IDs in the URL of user and group pages in the AWS SSO console.
The AWS SSO identity (a user or group).
", - "CreateAccessPolicyRequest$accessPolicyIdentity": "The identity for this access policy. Choose either a user
or a group
but not both.
The AWS SSO identity (user or group) to which this access policy applies.
", - "UpdateAccessPolicyRequest$accessPolicyIdentity": "The identity for this access policy. Choose either a user
or a group
but not both.
The identity (an AWS SSO user, an AWS SSO group, or an IAM user).
", + "CreateAccessPolicyRequest$accessPolicyIdentity": "The identity for this access policy. Choose an AWS SSO user, an AWS SSO group, or an IAM user.
", + "DescribeAccessPolicyResponse$accessPolicyIdentity": "The identity (AWS SSO user, AWS SSO group, or IAM user) to which this access policy applies.
", + "UpdateAccessPolicyRequest$accessPolicyIdentity": "The identity for this access policy. Choose an AWS SSO user, an AWS SSO group, or an IAM user.
" } }, "IdentityId": { "base": null, "refs": { "GroupIdentity$id": "The AWS SSO ID of the group.
", - "ListAccessPoliciesRequest$identityId": "The ID of the identity. This parameter is required if you specify identityType
.
The ID of the identity. This parameter is required if you specify USER
or GROUP
for identityType
.
The AWS SSO ID of the user.
" } }, "IdentityType": { "base": null, "refs": { - "ListAccessPoliciesRequest$identityType": "The type of identity (user or group). This parameter is required if you specify identityId
.
The type of identity (AWS SSO user, AWS SSO group, or IAM user). This parameter is required if you specify identityId
.
The hierarchy name provided in the CreateAssetModel or UpdateAssetModel API.
", - "AssetModelHierarchy$name": "The name of the asset model hierarchy that you specify by using the CreateAssetModel or UpdateAssetModel API.
", - "AssetModelHierarchyDefinition$name": "The name of the asset model hierarchy definition (as specified in CreateAssetModel or UpdateAssetModel).
", + "AssetHierarchy$name": "The hierarchy name provided in the CreateAssetModel or UpdateAssetModel API operation.
", + "AssetModelHierarchy$name": "The name of the asset model hierarchy that you specify by using the CreateAssetModel or UpdateAssetModel API operation.
", + "AssetModelHierarchyDefinition$name": "The name of the asset model hierarchy definition (as specified in the CreateAssetModel or UpdateAssetModel API operation).
", "AssetModelProperty$name": "The name of the asset model property.
", "AssetModelPropertyDefinition$name": "The name of the property definition.
", "AssetModelSummary$name": "The name of the asset model.
", @@ -1362,7 +1389,7 @@ "PortalClientId": { "base": null, "refs": { - "DescribePortalResponse$portalClientId": "The AWS SSO application generated client ID (used with AWS SSO APIs).
" + "DescribePortalResponse$portalClientId": "The AWS SSO application generated client ID (used with AWS SSO APIs). AWS IoT SiteWise includes portalClientId
for only portals that use AWS SSO to authenticate users.
Contains a list of value updates for an asset property in the list of asset entries consumed by the BatchPutAssetPropertyValue API.
", + "base": "Contains a list of value updates for an asset property in the list of asset entries consumed by the BatchPutAssetPropertyValue API operation.
", "refs": { "PutAssetPropertyValueEntries$member": null } @@ -1548,9 +1575,9 @@ "base": "Contains an AWS IoT SiteWise Monitor resource ID for a portal or project.
", "refs": { "AccessPolicySummary$resource": "The AWS IoT SiteWise Monitor resource (a portal or project).
", - "CreateAccessPolicyRequest$accessPolicyResource": "The AWS IoT SiteWise Monitor resource for this access policy. Choose either portal
or project
but not both.
The AWS IoT SiteWise Monitor resource for this access policy. Choose either a portal or a project.
", "DescribeAccessPolicyResponse$accessPolicyResource": "The AWS IoT SiteWise Monitor resource (portal or project) to which this access policy provides access.
", - "UpdateAccessPolicyRequest$accessPolicyResource": "The AWS IoT SiteWise Monitor resource for this access policy. Choose either portal
or project
but not both.
The AWS IoT SiteWise Monitor resource for this access policy. Choose either a portal or a project.
" } }, "ResourceAlreadyExistsException": { @@ -1586,7 +1613,7 @@ "SSOApplicationId": { "base": null, "refs": { - "CreatePortalResponse$ssoApplicationId": "The associated AWS SSO application Id.
" + "CreatePortalResponse$ssoApplicationId": "The associated AWS SSO application ID, if the portal uses AWS SSO.
" } }, "ServiceUnavailableException": { @@ -1594,6 +1621,12 @@ "refs": { } }, + "SessionDurationSeconds": { + "base": null, + "refs": { + "CreatePresignedPortalUrlRequest$sessionDurationSeconds": "The duration (in seconds) for which the session at the URL is valid.
Default: 900 seconds (15 minutes)
" + } + }, "TagKey": { "base": null, "refs": { @@ -1824,16 +1857,17 @@ "Url": { "base": null, "refs": { - "CreatePortalResponse$portalStartUrl": "The public URL for the AWS IoT SiteWise Monitor portal.
", - "DescribePortalResponse$portalStartUrl": "The public root URL for the AWS IoT AWS IoT SiteWise Monitor application portal.
", + "CreatePortalResponse$portalStartUrl": "The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.
", + "CreatePresignedPortalUrlResponse$presignedPortalUrl": "The pre-signed URL to the portal. The URL contains the portal ID and a session token that lets you access the portal. The URL has the following format.
https://<portal-id>.app.iotsitewise.aws/auth?token=<encrypted-token>
The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.
", "ImageLocation$url": "The URL where the image is available. The URL is valid for 15 minutes so that you can view and download the image
", - "PortalSummary$startUrl": "The public root URL for the AWS IoT AWS IoT SiteWise Monitor application portal.
" + "PortalSummary$startUrl": "The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.
" } }, "UserIdentity": { "base": "Contains information for a user identity in an access policy.
", "refs": { - "Identity$user": "A user identity.
" + "Identity$user": "An AWS SSO user identity.
" } }, "VariableName": { diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 994f38d4d17..bbd2e7e9044 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -13,7 +13,7 @@ "CopyDBClusterParameterGroup": "Copies the specified DB cluster parameter group.
This action only applies to Aurora DB clusters.
Copies a snapshot of a DB cluster.
To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot
action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:
KmsKeyId
- The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.
PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
KmsKeyId
- The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot
action that is called in the destination AWS Region, and the action contained in the pre-signed URL.
DestinationRegion
- The name of the AWS Region that the DB cluster snapshot is to be created in.
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier
looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.
If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion
(or --source-region
for the AWS CLI) instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.
TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier
while that DB cluster snapshot is in \"copying\" status.
For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.
For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Copies the specified DB parameter group.
", - "CopyDBSnapshot": "Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.
You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot
action is the destination AWS Region for the DB snapshot copy.
For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.
", + "CopyDBSnapshot": "Copies the specified DB snapshot. The source DB snapshot must be in the available
or storage-optimization
state.
You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot
action is the destination AWS Region for the DB snapshot copy.
For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.
", "CopyOptionGroup": "Copies the specified option group.
", "CreateCustomAvailabilityZone": "Creates a custom Availability Zone (AZ).
A custom AZ is an on-premises AZ that is integrated with a VMware vSphere cluster.
For more information about RDS on VMware, see the RDS on VMware User Guide.
", "CreateDBCluster": "Creates a new Amazon Aurora DB cluster.
You can use the ReplicationSourceIdentifier
parameter to create the DB cluster as a read replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier
is encrypted, you must also specify the PreSignedUrl
parameter.
For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Creates a DBSnapshot. The source DBInstance must be in \"available\" state.
", "CreateDBSubnetGroup": "Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.
", "CreateEventSubscription": "Creates an RDS event notification subscription. This action requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.
You can specify the type of source (SourceType
) that you want to be notified of and provide a list of RDS sources (SourceIds
) that triggers the events. You can also provide a list of event categories (EventCategories
) for events that you want to be notified of. For example, you can specify SourceType
= db-instance
, SourceIds
= mydbinstance1
, mydbinstance2
and EventCategories
= Availability
, Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
and SourceIdentifier
= myDBInstance1
, you are notified of all the db-instance
events for the specified source. If you specify a SourceType
but do not specify a SourceIdentifier
, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIdentifier
, you are notified of events generated from all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.
Creates an Aurora global database spread across multiple regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This action only applies to Aurora DB clusters.
Creates an Aurora global database spread across multiple AWS Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This action only applies to Aurora DB clusters.
Creates a new option group. You can create up to 20 option groups.
", "DeleteCustomAvailabilityZone": "Deletes a custom Availability Zone (AZ).
A custom AZ is an on-premises AZ that is integrated with a VMware vSphere cluster.
For more information about RDS on VMware, see the RDS on VMware User Guide.
", "DeleteDBCluster": "The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.
For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Returns the default engine and system parameter information for the specified database engine.
", "DescribeEventCategories": "Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in Events in the Amazon RDS User Guide.
", "DescribeEventSubscriptions": "Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName
, SNSTopicARN
, CustomerID
, SourceType
, SourceID
, CreationTime
, and Status
.
If you specify a SubscriptionName
, lists the description for that subscription.
Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, and DB cluster snapshots for the past 14 days. Events specific to a particular DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, and DB cluster snapshots group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.
", + "DescribeEvents": "Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, and DB cluster snapshots for the past 14 days. Events specific to a particular DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, and DB cluster snapshots group can be obtained by providing the name as a parameter.
By default, the past hour of events are returned.
Returns information about a snapshot export to Amazon S3. This API operation supports pagination.
", "DescribeGlobalClusters": "Returns information about Aurora global database clusters. This API supports pagination.
For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Describes the available installation media for a DB engine that requires an on-premises customer provided license, such as Microsoft SQL Server.
", @@ -2057,7 +2057,7 @@ "DescribeEventCategoriesMessage$Filters": "This parameter isn't currently supported.
", "DescribeEventSubscriptionsMessage$Filters": "This parameter isn't currently supported.
", "DescribeEventsMessage$Filters": "This parameter isn't currently supported.
", - "DescribeExportTasksMessage$Filters": "Filters specify one or more snapshot exports to describe. The filters are specified as name-value pairs that define what to include in the output.
Supported filters include the following:
export-task-identifier
- An identifier for the snapshot export task.
s3-bucket
- The Amazon S3 bucket the snapshot is exported to.
source-arn
- The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3
status
- The status of the export task.
Filters specify one or more snapshot exports to describe. The filters are specified as name-value pairs that define what to include in the output. Filter names and values are case-sensitive.
Supported filters include the following:
export-task-identifier
- An identifier for the snapshot export task.
s3-bucket
- The Amazon S3 bucket the snapshot is exported to.
source-arn
- The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3
status
- The status of the export task. Must be lowercase, for example, complete
.
A filter that specifies one or more global DB clusters to describe.
Supported filters:
db-cluster-id
- Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.
A filter that specifies one or more installation media to describe. Supported filters include the following:
custom-availability-zone-id
- Accepts custom Availability Zone (AZ) identifiers. The results list includes information about only the custom AZs identified by these identifiers.
engine
- Accepts database engines. The results list includes information about only the database engines identified by these identifiers.
For more information about the valid engines for installation media, see ImportInstallationMedia.
This parameter isn't currently supported.
", @@ -2493,8 +2493,8 @@ "refs": { "CloudwatchLogsExportConfiguration$EnableLogTypes": "The list of log types to enable.
", "CloudwatchLogsExportConfiguration$DisableLogTypes": "The list of log types to disable.
", - "CreateDBClusterMessage$EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
", - "CreateDBInstanceMessage$EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.
", + "CreateDBClusterMessage$EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
Aurora MySQL
Possible values are audit
, error
, general
, and slowquery
.
Aurora PostgreSQL
Possible values are postgresql
and upgrade
.
The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.
MariaDB
Possible values are audit
, error
, general
, and slowquery
.
Microsoft SQL Server
Possible values are agent
and error
.
MySQL
Possible values are audit
, error
, general
, and slowquery
.
Oracle
Possible values are alert
, audit
, listener
, and trace
.
PostgreSQL
Possible values are postgresql
and upgrade
.
The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
", "DBCluster$EnabledCloudwatchLogsExports": "A list of log types that this DB cluster is configured to export to CloudWatch Logs.
Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon Aurora User Guide.
", "DBEngineVersion$ExportableLogTypes": "The types of logs that the database engine has available for export to CloudWatch Logs.
", @@ -2924,7 +2924,7 @@ } }, "ProcessorFeature": { - "base": "Contains the processor features of a DB instance class.
To specify the number of CPU cores, use the coreCount
feature name for the Name
parameter. To specify the number of threads per core, use the threadsPerCore
feature name for the Name
parameter.
You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:
CreateDBInstance
ModifyDBInstance
RestoreDBInstanceFromDBSnapshot
RestoreDBInstanceFromS3
RestoreDBInstanceToPointInTime
You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions
action and specifying the instance class for the DBInstanceClass
parameter.
In addition, you can use the following actions for DB instance class processor information:
DescribeDBInstances
DescribeDBSnapshots
DescribeValidDBInstanceModifications
For more information, see Configuring the Processor of the DB Instance Class in the Amazon RDS User Guide.
", + "base": "Contains the processor features of a DB instance class.
To specify the number of CPU cores, use the coreCount
feature name for the Name
parameter. To specify the number of threads per core, use the threadsPerCore
feature name for the Name
parameter.
You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:
CreateDBInstance
ModifyDBInstance
RestoreDBInstanceFromDBSnapshot
RestoreDBInstanceFromS3
RestoreDBInstanceToPointInTime
You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions
action and specifying the instance class for the DBInstanceClass
parameter.
In addition, you can use the following actions for DB instance class processor information:
DescribeDBInstances
DescribeDBSnapshots
DescribeValidDBInstanceModifications
If you call DescribeDBInstances
, ProcessorFeature
returns non-null values only if the following conditions are met:
You are accessing an Oracle DB instance.
Your Oracle DB instance class supports configuring the number of CPU cores and threads per core.
The current number CPU cores and threads is set to a non-default value.
For more information, see Configuring the Processor of the DB Instance Class in the Amazon RDS User Guide.
", "refs": { "ProcessorFeatureList$member": null } @@ -3536,7 +3536,7 @@ "CreateEventSubscriptionMessage$SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance
. If this value isn't specified, all events are returned.
Valid values: db-instance
| db-cluster
| db-parameter-group
| db-security-group
| db-snapshot
| db-cluster-snapshot
The cluster identifier of the new global database cluster.
", "CreateGlobalClusterMessage$SourceDBClusterIdentifier": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional.
", - "CreateGlobalClusterMessage$Engine": "Provides the name of the database engine to be used for this DB cluster.
", + "CreateGlobalClusterMessage$Engine": "The name of the database engine to be used for this DB cluster.
", "CreateGlobalClusterMessage$EngineVersion": "The engine version of the Aurora global database.
", "CreateGlobalClusterMessage$DatabaseName": "The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.
", "CreateOptionGroupMessage$OptionGroupName": "Specifies the name of the option group to be created.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens
First character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
Example: myoptiongroup
Specifies the progress of the operation as a percentage.
", "DBCluster$Endpoint": "Specifies the connection endpoint for the primary instance of the DB cluster.
", "DBCluster$ReaderEndpoint": "The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load-balances connections across the Aurora Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Aurora distributes the connection requests among the Aurora Replicas in the DB cluster. This functionality can help balance your read workload across multiple Aurora Replicas in your DB cluster.
If a failover occurs, and the Aurora Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Aurora Replicas in the cluster, you can then reconnect to the reader endpoint.
", - "DBCluster$Engine": "Provides the name of the database engine to be used for this DB cluster.
", + "DBCluster$Engine": "The name of the database engine to be used for this DB cluster.
", "DBCluster$EngineVersion": "Indicates the database engine version.
", "DBCluster$MasterUsername": "Contains the master username for the DB cluster.
", "DBCluster$PreferredBackupWindow": "Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod
.
A pagination token that can be used in a later DescribeDBClusters request.
", "DBClusterOptionGroupStatus$DBClusterOptionGroupName": "Specifies the name of the DB cluster option group.
", "DBClusterOptionGroupStatus$Status": "Specifies the status of the DB cluster option group.
", - "DBClusterParameterGroup$DBClusterParameterGroupName": "Provides the name of the DB cluster parameter group.
", - "DBClusterParameterGroup$DBParameterGroupFamily": "Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.
", + "DBClusterParameterGroup$DBClusterParameterGroupName": "The name of the DB cluster parameter group.
", + "DBClusterParameterGroup$DBParameterGroupFamily": "The name of the DB parameter group family that this DB cluster parameter group is compatible with.
", "DBClusterParameterGroup$Description": "Provides the customer-specified description for this DB cluster parameter group.
", "DBClusterParameterGroup$DBClusterParameterGroupArn": "The Amazon Resource Name (ARN) for the DB cluster parameter group.
", "DBClusterParameterGroupDetails$Marker": " An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.
", "DBInstance$DBInstanceClass": "Contains the name of the compute and memory capacity class of the DB instance.
", - "DBInstance$Engine": "Provides the name of the database engine to be used for this DB instance.
", + "DBInstance$Engine": "The name of the database engine to be used for this DB instance.
", "DBInstance$DBInstanceStatus": "Specifies the current state of this database.
For information about DB instance statuses, see DB Instance Status in the Amazon RDS User Guide.
", "DBInstance$MasterUsername": "Contains the master username for the DB instance.
", "DBInstance$DBName": "The meaning of this parameter differs according to the database engine you use.
MySQL, MariaDB, SQL Server, PostgreSQL
Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.
Type: String
Oracle
Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.
", @@ -3671,12 +3671,12 @@ "DBInstanceStatusInfo$StatusType": "This value is currently \"read replication.\"
", "DBInstanceStatusInfo$Status": "Status of the DB instance. For a StatusType of read replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated.
", "DBInstanceStatusInfo$Message": "Details of the error if there is an error for the instance. If the instance isn't in an error state, this value is blank.
", - "DBParameterGroup$DBParameterGroupName": "Provides the name of the DB parameter group.
", - "DBParameterGroup$DBParameterGroupFamily": "Provides the name of the DB parameter group family that this DB parameter group is compatible with.
", + "DBParameterGroup$DBParameterGroupName": "The name of the DB parameter group.
", + "DBParameterGroup$DBParameterGroupFamily": "The name of the DB parameter group family that this DB parameter group is compatible with.
", "DBParameterGroup$Description": "Provides the customer-specified description for this DB parameter group.
", "DBParameterGroup$DBParameterGroupArn": "The Amazon Resource Name (ARN) for the DB parameter group.
", "DBParameterGroupDetails$Marker": " An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
Provides the name of the DB parameter group.
", + "DBParameterGroupNameMessage$DBParameterGroupName": "The name of the DB parameter group.
", "DBParameterGroupStatus$DBParameterGroupName": "The name of the DB parameter group.
", "DBParameterGroupStatus$ParameterApplyStatus": "The status of parameter updates.
", "DBParameterGroupsMessage$Marker": " An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The identifier of the subnet.
", "Subnet$SubnetStatus": "The status of the subnet.
", "SubnetIdentifierList$member": null, - "Tag$Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", - "Tag$Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").
", + "Tag$Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").
", + "Tag$Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").
", "TargetHealth$Description": "A description of the health of the RDS Proxy target. If the State
is AVAILABLE
, a description is not included.
The name of the time zone.
", "UpgradeTarget$Engine": "The name of the upgrade target database engine.
", diff --git a/models/apis/resource-groups/2017-11-27/docs-2.json b/models/apis/resource-groups/2017-11-27/docs-2.json index b2fc39a45cc..d3394ee2a1e 100644 --- a/models/apis/resource-groups/2017-11-27/docs-2.json +++ b/models/apis/resource-groups/2017-11-27/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "AWS Resource Groups lets you organize AWS resources such as Amazon EC2 instances, Amazon Relational Database Service databases, and Amazon S3 buckets into groups using criteria that you define as tags. A resource group is a collection of resources that match the resource types specified in a query, and share one or more tags or portions of tags. You can create a group of resources based on their roles in your cloud infrastructure, lifecycle stages, regions, application layers, or virtually any criteria. Resource groups enable you to automate management tasks, such as those in AWS Systems Manager Automation documents, on tag-related resources in AWS Systems Manager. Groups of tagged resources also let you quickly view a custom console in AWS Systems Manager that shows AWS Config compliance and other monitoring data about member resources.
To create a resource group, build a resource query, and specify tags that identify the criteria that members of the group have in common. Tags are key-value pairs.
For more information about Resource Groups, see the AWS Resource Groups User Guide.
AWS Resource Groups uses a REST-compliant API that you can use to perform the following types of operations.
Create, Read, Update, and Delete (CRUD) operations on resource groups and resource query entities
Applying, editing, and removing tags from resource groups
Resolving resource group member ARNs so they can be returned as search results
Getting data about resources that are members of a group
Searching AWS resources based on a resource query
AWS Resource Groups lets you organize AWS resources such as Amazon EC2 instances, Amazon Relational Database Service databases, and Amazon S3 buckets into groups using criteria that you define as tags. A resource group is a collection of resources that match the resource types specified in a query, and share one or more tags or portions of tags. You can create a group of resources based on their roles in your cloud infrastructure, lifecycle stages, regions, application layers, or virtually any criteria. Resource Groups enable you to automate management tasks, such as those in AWS Systems Manager Automation documents, on tag-related resources in AWS Systems Manager. Groups of tagged resources also let you quickly view a custom console in AWS Systems Manager that shows AWS Config compliance and other monitoring data about member resources.
To create a resource group, build a resource query, and specify tags that identify the criteria that members of the group have in common. Tags are key-value pairs.
For more information about Resource Groups, see the AWS Resource Groups User Guide.
AWS Resource Groups uses a REST-compliant API that you can use to perform the following types of operations.
Create, Read, Update, and Delete (CRUD) operations on resource groups and resource query entities
Applying, editing, and removing tags from resource groups
Resolving resource group member ARNs so they can be returned as search results
Getting data about resources that are members of a group
Searching AWS resources based on a resource query
Creates a resource group with the specified name and description. You can optionally include a resource query, or a service configuration.
", "DeleteGroup": "Deletes the specified resource group. Deleting a resource group does not delete any resources that are members of the group; it only deletes the group structure.
", @@ -11,7 +11,7 @@ "GroupResources": "Adds the specified resources to the specified group.
", "ListGroupResources": "Returns a list of ARNs of the resources that are members of a specified resource group.
", "ListGroups": "Returns a list of existing resource groups in your account.
", - "SearchResources": "Returns a list of AWS resource identifiers that matches tne specified query. The query uses the same format as a resource query in a CreateGroup or UpdateGroupQuery operation.
", + "SearchResources": "Returns a list of AWS resource identifiers that matches the specified query. The query uses the same format as a resource query in a CreateGroup or UpdateGroupQuery operation.
", "Tag": "Adds tags to a resource group with the specified ARN. Existing tags on a resource group are not changed if they are not specified in the request parameters.
Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data.
Removes the specified resources from the specified group.
", "Untag": "Deletes tags from a specified resource group.
", @@ -403,7 +403,7 @@ "QueryType": { "base": null, "refs": { - "ResourceQuery$Type": "The type of the query. You can use the following values:
CLOUDFORMATION_STACK_1_0:
A JSON syntax that lets you specify a CloudFormation stack ARN.
TAG_FILTERS_1_0:
A JSON syntax that lets you specify a collection of simple tag filters for resource types and tags, as supported by the AWS Tagging API ResourceTypeFilters
parameter of the tagging:GetResources
operation. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.
For example, consider the following sample query for resources that have two tags, Stage
and Version
, with two values each:
[{\"Key\":\"Stage\",\"Values\":[\"Test\",\"Deploy\"]},{\"Key\":\"Version\",\"Values\":[\"1\",\"2\"]}]
The results of this query could include the following.
An EC2 instance that has the following two tags: {\"Key\":\"Stage\",\"Value\":\"Deploy\"}
, and {\"Key\":\"Version\",\"Value\":\"2\"}
An S3 bucket that has the following two tags: {\"Key\":\"Stage\",\"Value\":\"Test\"}
, and {\"Key\":\"Version\",\"Value\":\"1\"}
The query would not include the following items in the results, however.
An EC2 instance that has only the following tag: {\"Key\":\"Stage\",\"Value\":\"Deploy\"}
.
The instance does not have all of the tag keys specified in the filter, so it is excluded from the results.
An RDS database that has the following two tags: {\"Key\":\"Stage\",\"Value\":\"Archived\"}
, and {\"Key\":\"Version\",\"Value\":\"4\"}
The database has all of the tag keys, but none of those keys has an associated value that matches at least one of the specified values in the filter.
The type of the query. You can use the following values:
CLOUDFORMATION_STACK_1_0:
Specifies that the Query
contains an ARN for a CloudFormation stack.
TAG_FILTERS_1_0:
Specifies that the Query
parameter contains a JSON string that represents a collection of simple tag filters for resource types and tags. The JSON string uses a syntax similar to the GetResources
operation, but uses only the ResourceTypeFilters
and TagFilters
fields. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values.
For example, consider the following sample query for resources that have two tags, Stage
and Version
, with two values each:
[{\"Stage\":[\"Test\",\"Deploy\"]},{\"Version\":[\"1\",\"2\"]}]
The results of this query could include the following.
An EC2 instance that has the following two tags: {\"Stage\":\"Deploy\"}
, and {\"Version\":\"2\"}
An S3 bucket that has the following two tags: {\"Stage\":\"Test\"}
, and {\"Version\":\"1\"}
The query would not include the following items in the results, however.
An EC2 instance that has only the following tag: {\"Stage\":\"Deploy\"}
.
The instance does not have all of the tag keys specified in the filter, so it is excluded from the results.
An RDS database that has the following two tags: {\"Stage\":\"Archived\"}
and {\"Version\":\"4\"}
The database has all of the tag keys, but none of those keys has an associated value that matches at least one of the specified values in the filter.
The query that is used to define a resource group or a search for resources.
", + "base": "The query that is used to define a resource group or a search for resources. A query specifies both a query type and a query string as a JSON object. See the examples section for example JSON strings.
The examples that follow are shown as standard JSON strings. If you include such a string as a parameter to the AWS CLI or an SDK API, you might need to 'escape' the string into a single line. For example, see the Quoting strings in the AWS CLI User Guide.
Example 1
The following generic example shows a resource query JSON string that includes only resources that meet the following criteria:
The resource type must be either resource_type1
or resource_type2
.
The resource must have a tag Key1
with a value of either ValueA
or ValueB
.
The resource must have a tag Key2
with a value of either ValueC
or ValueD
.
{ \"Type\": \"TAG_FILTERS_1_0\", \"Query\": { \"ResourceTypeFilters\": [ \"resource_type1\", \"resource_type2\"], \"TagFilters\": [ { \"Key\": \"Key1\", \"Values\": [\"ValueA\",\"ValueB\"] }, { \"Key\":\"Key2\", \"Values\":[\"ValueC\",\"ValueD\"] } ] } }
This has the equivalent \"shortcut\" syntax of the following:
{ \"Type\": \"TAG_FILTERS_1_0\", \"Query\": { \"ResourceTypeFilters\": [ \"resource_type1\", \"resource_type2\"], \"TagFilters\": [ { \"Key1\": [\"ValueA\",\"ValueB\"] }, { \"Key2\": [\"ValueC\",\"ValueD\"] } ] } }
Example 2
The following example shows a resource query JSON string that includes only Amazon EC2 instances that are tagged Stage
with a value of Test
.
{ \"Type\": \"TAG_FILTERS_1_0\", \"Query\": \"{ \"ResourceTypeFilters\": \"AWS::EC2::Instance\", \"TagFilters\": { \"Stage\": \"Test\" } } }
Example 3
The following example shows a resource query JSON string that includes resource of any supported type as long as it is tagged Stage
with a value of Prod
.
{ \"Type\": \"TAG_FILTERS_1_0\", \"Query\": { \"ResourceTypeFilters\": \"AWS::AllSupported\", \"TagFilters\": { \"Stage\": \"Prod\" } } }
Example 4
The following example shows a resource query JSON string that includes only Amazon EC2 instances and Amazon S3 buckets that are part of the specified AWS CloudFormation stack.
{ \"Type\": \"CLOUDFORMATION_STACK_1_0\", \"Query\": { \"ResourceTypeFilters\": [ \"AWS::EC2::Instance\", \"AWS::S3::Bucket\" ], \"StackIdentifier\": \"arn:aws:cloudformation:us-west-2:123456789012:stack/AWStestuseraccount/fb0d5000-aba8-00e8-aa9e-50d5cEXAMPLE\" } }
The resource query that determines which AWS resources are members of this group.
You can specify either a ResourceQuery
or a Configuration
, but not both.
The resource query associated with the group.
", diff --git a/models/apis/resource-groups/2017-11-27/paginators-1.json b/models/apis/resource-groups/2017-11-27/paginators-1.json index 809603d71ab..b68c8171786 100644 --- a/models/apis/resource-groups/2017-11-27/paginators-1.json +++ b/models/apis/resource-groups/2017-11-27/paginators-1.json @@ -3,17 +3,20 @@ "ListGroupResources": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "ResourceIdentifiers" }, "ListGroups": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "GroupIdentifiers" }, "SearchResources": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "ResourceIdentifiers" } } } diff --git a/models/apis/resourcegroupstaggingapi/2017-01-26/docs-2.json b/models/apis/resourcegroupstaggingapi/2017-01-26/docs-2.json index 6acc7d54d1f..0e462efca24 100644 --- a/models/apis/resourcegroupstaggingapi/2017-01-26/docs-2.json +++ b/models/apis/resourcegroupstaggingapi/2017-01-26/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "This guide describes the API operations for the resource groups tagging.
A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.
Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data.
Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.
You can use the resource groups tagging API operations to complete the following tasks:
Tag and untag supported resources located in the specified Region for the AWS account.
Use tag-based filters to search for resources located in the specified Region for the AWS account.
List all existing tag keys in the specified Region for the AWS account.
List all existing values for the specified key in the specified Region for the AWS account.
To use resource groups tagging API operations, you must add the following permissions to your IAM policy:
tag:GetResources
tag:TagResources
tag:UntagResources
tag:GetTagKeys
tag:GetTagValues
You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.
For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.
Services that support the Resource Groups Tagging API
You can use the Resource Groups Tagging API to tag resources for the following AWS services.
This guide describes the API operations for the resource groups tagging.
A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.
Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data.
Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.
You can use the resource groups tagging API operations to complete the following tasks:
Tag and untag supported resources located in the specified Region for the AWS account.
Use tag-based filters to search for resources located in the specified Region for the AWS account.
List all existing tag keys in the specified Region for the AWS account.
List all existing values for the specified key in the specified Region for the AWS account.
To use resource groups tagging API operations, you must add the following permissions to your IAM policy:
tag:GetResources
tag:TagResources
tag:UntagResources
tag:GetTagKeys
tag:GetTagValues
You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.
For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.
Services that support the Resource Groups Tagging API
You can use the Resource Groups Tagging API to tag resources for the following AWS services.
Describes the status of the StartReportCreation
operation.
You can call this operation only from the organization's master account and from the us-east-1 Region.
", "GetComplianceSummary": "Returns a table that shows counts of resources that are noncompliant with their tag policies.
For more information on tag policies, see Tag Policies in the AWS Organizations User Guide.
You can call this operation only from the organization's master account and from the us-east-1 Region.
", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 73f710b9f02..4c595a694c2 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -2067,6 +2067,7 @@ "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -5351,6 +5352,12 @@ }, "hostname" : "snowball-fips.ap-northeast-2.amazonaws.com" }, + "fips-ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "snowball-fips.ap-northeast-3.amazonaws.com" + }, "fips-ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" diff --git a/service/cloudwatchevents/api.go b/service/cloudwatchevents/api.go index 570b5a51be3..66aa4bf0cec 100644 --- a/service/cloudwatchevents/api.go +++ b/service/cloudwatchevents/api.go @@ -2419,6 +2419,8 @@ func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *reque // // * Amazon API Gateway REST APIs // +// * Redshift Clusters to invoke Data API ExecuteStatement on +// // Creating rules with built-in targets is supported only in the AWS Management // Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances // API call, EC2 StopInstances API call, and EC2 TerminateInstances API call. @@ -7006,6 +7008,114 @@ func (s *PutTargetsResultEntry) SetTargetId(v string) *PutTargetsResultEntry { return s } +// These are custom parameters to be used when the target is a Redshift cluster +// to invoke the Redshift Data API ExecuteStatement based on EventBridge events. +type RedshiftDataParameters struct { + _ struct{} `type:"structure"` + + // The name of the database. Required when authenticating using temporary credentials. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // The database user name. Required when authenticating using temporary credentials. + DbUser *string `min:"1" type:"string"` + + // The name or ARN of the secret that enables access to the database. Required + // when authenticating using AWS Secrets Manager. + SecretManagerArn *string `min:"1" type:"string"` + + // The SQL statement text to run. + // + // Sql is a required field + Sql *string `min:"1" type:"string" required:"true"` + + // The name of the SQL statement. You can name the SQL statement when you create + // it to identify the query. + StatementName *string `min:"1" type:"string"` + + // Indicates whether to send an event back to EventBridge after the SQL statement + // runs. + WithEvent *bool `type:"boolean"` +} + +// String returns the string representation +func (s RedshiftDataParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDataParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDataParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDataParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.DbUser != nil && len(*s.DbUser) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DbUser", 1)) + } + if s.SecretManagerArn != nil && len(*s.SecretManagerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretManagerArn", 1)) + } + if s.Sql == nil { + invalidParams.Add(request.NewErrParamRequired("Sql")) + } + if s.Sql != nil && len(*s.Sql) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Sql", 1)) + } + if s.StatementName != nil && len(*s.StatementName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *RedshiftDataParameters) SetDatabase(v string) *RedshiftDataParameters { + s.Database = &v + return s +} + +// SetDbUser sets the DbUser field's value. +func (s *RedshiftDataParameters) SetDbUser(v string) *RedshiftDataParameters { + s.DbUser = &v + return s +} + +// SetSecretManagerArn sets the SecretManagerArn field's value. +func (s *RedshiftDataParameters) SetSecretManagerArn(v string) *RedshiftDataParameters { + s.SecretManagerArn = &v + return s +} + +// SetSql sets the Sql field's value. +func (s *RedshiftDataParameters) SetSql(v string) *RedshiftDataParameters { + s.Sql = &v + return s +} + +// SetStatementName sets the StatementName field's value. +func (s *RedshiftDataParameters) SetStatementName(v string) *RedshiftDataParameters { + s.StatementName = &v + return s +} + +// SetWithEvent sets the WithEvent field's value. +func (s *RedshiftDataParameters) SetWithEvent(v bool) *RedshiftDataParameters { + s.WithEvent = &v + return s +} + type RemovePermissionInput struct { _ struct{} `type:"structure"` @@ -7784,6 +7894,14 @@ type Target struct { // default is to use the eventId as the partition key. KinesisParameters *KinesisParameters `type:"structure"` + // Contains the Redshift Data API parameters to use when the target is a Redshift + // cluster. + // + // If you specify a Redshift Cluster as a Target, you can use this to specify + // parameters to invoke the Redshift Data API ExecuteStatement based on EventBridge + // events. + RedshiftDataParameters *RedshiftDataParameters `type:"structure"` + // The Amazon Resource Name (ARN) of the IAM role to be used for this target // when the rule is triggered. If one rule triggers multiple targets, you can // use a different IAM role for each target. @@ -7847,6 +7965,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("KinesisParameters", err.(request.ErrInvalidParams)) } } + if s.RedshiftDataParameters != nil { + if err := s.RedshiftDataParameters.Validate(); err != nil { + invalidParams.AddNested("RedshiftDataParameters", err.(request.ErrInvalidParams)) + } + } if s.RunCommandParameters != nil { if err := s.RunCommandParameters.Validate(); err != nil { invalidParams.AddNested("RunCommandParameters", err.(request.ErrInvalidParams)) @@ -7913,6 +8036,12 @@ func (s *Target) SetKinesisParameters(v *KinesisParameters) *Target { return s } +// SetRedshiftDataParameters sets the RedshiftDataParameters field's value. +func (s *Target) SetRedshiftDataParameters(v *RedshiftDataParameters) *Target { + s.RedshiftDataParameters = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *Target) SetRoleArn(v string) *Target { s.RoleArn = &v diff --git a/service/eventbridge/api.go b/service/eventbridge/api.go index b34d8dec06c..3f93ecf0b48 100644 --- a/service/eventbridge/api.go +++ b/service/eventbridge/api.go @@ -2419,6 +2419,8 @@ func (c *EventBridge) PutTargetsRequest(input *PutTargetsInput) (req *request.Re // // * Amazon API Gateway REST APIs // +// * Redshift Clusters to invoke Data API ExecuteStatement on +// // Creating rules with built-in targets is supported only in the AWS Management // Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances // API call, EC2 StopInstances API call, and EC2 TerminateInstances API call. @@ -7006,6 +7008,114 @@ func (s *PutTargetsResultEntry) SetTargetId(v string) *PutTargetsResultEntry { return s } +// These are custom parameters to be used when the target is a Redshift cluster +// to invoke the Redshift Data API ExecuteStatement based on EventBridge events. +type RedshiftDataParameters struct { + _ struct{} `type:"structure"` + + // The name of the database. Required when authenticating using temporary credentials. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // The database user name. Required when authenticating using temporary credentials. + DbUser *string `min:"1" type:"string"` + + // The name or ARN of the secret that enables access to the database. Required + // when authenticating using AWS Secrets Manager. + SecretManagerArn *string `min:"1" type:"string"` + + // The SQL statement text to run. + // + // Sql is a required field + Sql *string `min:"1" type:"string" required:"true"` + + // The name of the SQL statement. You can name the SQL statement when you create + // it to identify the query. + StatementName *string `min:"1" type:"string"` + + // Indicates whether to send an event back to EventBridge after the SQL statement + // runs. + WithEvent *bool `type:"boolean"` +} + +// String returns the string representation +func (s RedshiftDataParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDataParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDataParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDataParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.DbUser != nil && len(*s.DbUser) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DbUser", 1)) + } + if s.SecretManagerArn != nil && len(*s.SecretManagerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretManagerArn", 1)) + } + if s.Sql == nil { + invalidParams.Add(request.NewErrParamRequired("Sql")) + } + if s.Sql != nil && len(*s.Sql) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Sql", 1)) + } + if s.StatementName != nil && len(*s.StatementName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *RedshiftDataParameters) SetDatabase(v string) *RedshiftDataParameters { + s.Database = &v + return s +} + +// SetDbUser sets the DbUser field's value. +func (s *RedshiftDataParameters) SetDbUser(v string) *RedshiftDataParameters { + s.DbUser = &v + return s +} + +// SetSecretManagerArn sets the SecretManagerArn field's value. +func (s *RedshiftDataParameters) SetSecretManagerArn(v string) *RedshiftDataParameters { + s.SecretManagerArn = &v + return s +} + +// SetSql sets the Sql field's value. +func (s *RedshiftDataParameters) SetSql(v string) *RedshiftDataParameters { + s.Sql = &v + return s +} + +// SetStatementName sets the StatementName field's value. +func (s *RedshiftDataParameters) SetStatementName(v string) *RedshiftDataParameters { + s.StatementName = &v + return s +} + +// SetWithEvent sets the WithEvent field's value. +func (s *RedshiftDataParameters) SetWithEvent(v bool) *RedshiftDataParameters { + s.WithEvent = &v + return s +} + type RemovePermissionInput struct { _ struct{} `type:"structure"` @@ -7784,6 +7894,14 @@ type Target struct { // default is to use the eventId as the partition key. KinesisParameters *KinesisParameters `type:"structure"` + // Contains the Redshift Data API parameters to use when the target is a Redshift + // cluster. + // + // If you specify a Redshift Cluster as a Target, you can use this to specify + // parameters to invoke the Redshift Data API ExecuteStatement based on EventBridge + // events. + RedshiftDataParameters *RedshiftDataParameters `type:"structure"` + // The Amazon Resource Name (ARN) of the IAM role to be used for this target // when the rule is triggered. If one rule triggers multiple targets, you can // use a different IAM role for each target. @@ -7847,6 +7965,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("KinesisParameters", err.(request.ErrInvalidParams)) } } + if s.RedshiftDataParameters != nil { + if err := s.RedshiftDataParameters.Validate(); err != nil { + invalidParams.AddNested("RedshiftDataParameters", err.(request.ErrInvalidParams)) + } + } if s.RunCommandParameters != nil { if err := s.RunCommandParameters.Validate(); err != nil { invalidParams.AddNested("RunCommandParameters", err.(request.ErrInvalidParams)) @@ -7913,6 +8036,12 @@ func (s *Target) SetKinesisParameters(v *KinesisParameters) *Target { return s } +// SetRedshiftDataParameters sets the RedshiftDataParameters field's value. +func (s *Target) SetRedshiftDataParameters(v *RedshiftDataParameters) *Target { + s.RedshiftDataParameters = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *Target) SetRoleArn(v string) *Target { s.RoleArn = &v diff --git a/service/glue/api.go b/service/glue/api.go index fe7e6ef16dc..d99a18fd723 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -1081,6 +1081,97 @@ func (c *Glue) BatchStopJobRunWithContext(ctx aws.Context, input *BatchStopJobRu return out, req.Send() } +const opBatchUpdatePartition = "BatchUpdatePartition" + +// BatchUpdatePartitionRequest generates a "aws/request.Request" representing the +// client's request for the BatchUpdatePartition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchUpdatePartition for more information on using the BatchUpdatePartition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchUpdatePartitionRequest method. +// req, resp := client.BatchUpdatePartitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchUpdatePartition +func (c *Glue) BatchUpdatePartitionRequest(input *BatchUpdatePartitionInput) (req *request.Request, output *BatchUpdatePartitionOutput) { + op := &request.Operation{ + Name: opBatchUpdatePartition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchUpdatePartitionInput{} + } + + output = &BatchUpdatePartitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchUpdatePartition API operation for AWS Glue. +// +// Updates one or more partitions in a batch operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation BatchUpdatePartition for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input provided was not valid. +// +// * EntityNotFoundException +// A specified entity does not exist +// +// * OperationTimeoutException +// The operation timed out. +// +// * InternalServiceException +// An internal service error occurred. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchUpdatePartition +func (c *Glue) BatchUpdatePartition(input *BatchUpdatePartitionInput) (*BatchUpdatePartitionOutput, error) { + req, out := c.BatchUpdatePartitionRequest(input) + return out, req.Send() +} + +// BatchUpdatePartitionWithContext is the same as BatchUpdatePartition with the addition of +// the ability to pass a context and additional request options. +// +// See BatchUpdatePartition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) BatchUpdatePartitionWithContext(ctx aws.Context, input *BatchUpdatePartitionInput, opts ...request.Option) (*BatchUpdatePartitionOutput, error) { + req, out := c.BatchUpdatePartitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCancelMLTaskRun = "CancelMLTaskRun" // CancelMLTaskRunRequest generates a "aws/request.Request" representing the @@ -15052,6 +15143,219 @@ func (s *BatchStopJobRunSuccessfulSubmission) SetJobRunId(v string) *BatchStopJo return s } +// Contains information about a batch update partition error. +type BatchUpdatePartitionFailureEntry struct { + _ struct{} `type:"structure"` + + // The details about the batch update partition error. + ErrorDetail *ErrorDetail `type:"structure"` + + // A list of values defining the partitions. + PartitionValueList []*string `type:"list"` +} + +// String returns the string representation +func (s BatchUpdatePartitionFailureEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionFailureEntry) GoString() string { + return s.String() +} + +// SetErrorDetail sets the ErrorDetail field's value. +func (s *BatchUpdatePartitionFailureEntry) SetErrorDetail(v *ErrorDetail) *BatchUpdatePartitionFailureEntry { + s.ErrorDetail = v + return s +} + +// SetPartitionValueList sets the PartitionValueList field's value. +func (s *BatchUpdatePartitionFailureEntry) SetPartitionValueList(v []*string) *BatchUpdatePartitionFailureEntry { + s.PartitionValueList = v + return s +} + +type BatchUpdatePartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the catalog in which the partition is to be updated. Currently, + // this should be the AWS account ID. + CatalogId *string `min:"1" type:"string"` + + // The name of the metadata database in which the partition is to be updated. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of up to 100 BatchUpdatePartitionRequestEntry objects to update. + // + // Entries is a required field + Entries []*BatchUpdatePartitionRequestEntry `min:"1" type:"list" required:"true"` + + // The name of the metadata table in which the partition is to be updated. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchUpdatePartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchUpdatePartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchUpdatePartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil && len(s.Entries) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Entries", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchUpdatePartitionInput) SetCatalogId(v string) *BatchUpdatePartitionInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchUpdatePartitionInput) SetDatabaseName(v string) *BatchUpdatePartitionInput { + s.DatabaseName = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *BatchUpdatePartitionInput) SetEntries(v []*BatchUpdatePartitionRequestEntry) *BatchUpdatePartitionInput { + s.Entries = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchUpdatePartitionInput) SetTableName(v string) *BatchUpdatePartitionInput { + s.TableName = &v + return s +} + +type BatchUpdatePartitionOutput struct { + _ struct{} `type:"structure"` + + // The errors encountered when trying to update the requested partitions. A + // list of BatchUpdatePartitionFailureEntry objects. + Errors []*BatchUpdatePartitionFailureEntry `type:"list"` +} + +// String returns the string representation +func (s BatchUpdatePartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchUpdatePartitionOutput) SetErrors(v []*BatchUpdatePartitionFailureEntry) *BatchUpdatePartitionOutput { + s.Errors = v + return s +} + +// A structure that contains the values and structure used to update a partition. +type BatchUpdatePartitionRequestEntry struct { + _ struct{} `type:"structure"` + + // The structure used to update a partition. + // + // PartitionInput is a required field + PartitionInput *PartitionInput `type:"structure" required:"true"` + + // A list of values defining the partitions. + // + // PartitionValueList is a required field + PartitionValueList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchUpdatePartitionRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchUpdatePartitionRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchUpdatePartitionRequestEntry"} + if s.PartitionInput == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionInput")) + } + if s.PartitionValueList == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValueList")) + } + if s.PartitionInput != nil { + if err := s.PartitionInput.Validate(); err != nil { + invalidParams.AddNested("PartitionInput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPartitionInput sets the PartitionInput field's value. +func (s *BatchUpdatePartitionRequestEntry) SetPartitionInput(v *PartitionInput) *BatchUpdatePartitionRequestEntry { + s.PartitionInput = v + return s +} + +// SetPartitionValueList sets the PartitionValueList field's value. +func (s *BatchUpdatePartitionRequestEntry) SetPartitionValueList(v []*string) *BatchUpdatePartitionRequestEntry { + s.PartitionValueList = v + return s +} + // Defines a binary column statistics data. type BinaryColumnStatisticsData struct { _ struct{} `type:"structure"` @@ -16548,6 +16852,16 @@ type Connection struct { // * KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs // that are the addresses of the Apache Kafka brokers in a Kafka cluster // to which a Kafka client will connect to and bootstrap itself. + // + // * KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka + // connection. Default value is "true". + // + // * KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem + // format). The default is an empty string. + // + // * KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of + // the CA cert file or not. AWS Glue validates for three algorithms: SHA256withRSA, + // SHA384withRSA and SHA512withRSA. Default value is "false". ConnectionProperties map[string]*string `type:"map"` // The type of the connection. Currently, SFTP is not supported. @@ -16658,6 +16972,9 @@ type ConnectionInput struct { // // * MONGODB - Designates a connection to a MongoDB document database. // + // * NETWORK - Designates a network connection to a data source within an + // Amazon Virtual Private Cloud environment (Amazon VPC). + // // SFTP is not supported. // // ConnectionType is a required field diff --git a/service/glue/glueiface/interface.go b/service/glue/glueiface/interface.go index 58f1437d84d..c8f5716a9af 100644 --- a/service/glue/glueiface/interface.go +++ b/service/glue/glueiface/interface.go @@ -108,6 +108,10 @@ type GlueAPI interface { BatchStopJobRunWithContext(aws.Context, *glue.BatchStopJobRunInput, ...request.Option) (*glue.BatchStopJobRunOutput, error) BatchStopJobRunRequest(*glue.BatchStopJobRunInput) (*request.Request, *glue.BatchStopJobRunOutput) + BatchUpdatePartition(*glue.BatchUpdatePartitionInput) (*glue.BatchUpdatePartitionOutput, error) + BatchUpdatePartitionWithContext(aws.Context, *glue.BatchUpdatePartitionInput, ...request.Option) (*glue.BatchUpdatePartitionOutput, error) + BatchUpdatePartitionRequest(*glue.BatchUpdatePartitionInput) (*request.Request, *glue.BatchUpdatePartitionOutput) + CancelMLTaskRun(*glue.CancelMLTaskRunInput) (*glue.CancelMLTaskRunOutput, error) CancelMLTaskRunWithContext(aws.Context, *glue.CancelMLTaskRunInput, ...request.Option) (*glue.CancelMLTaskRunOutput, error) CancelMLTaskRunRequest(*glue.CancelMLTaskRunInput) (*request.Request, *glue.CancelMLTaskRunOutput) diff --git a/service/iotsitewise/api.go b/service/iotsitewise/api.go index d00c2f91a5c..a6500d4c8cb 100644 --- a/service/iotsitewise/api.go +++ b/service/iotsitewise/api.go @@ -509,9 +509,9 @@ func (c *IoTSiteWise) CreateAccessPolicyRequest(input *CreateAccessPolicyInput) // CreateAccessPolicy API operation for AWS IoT SiteWise. // -// Creates an access policy that grants the specified AWS Single Sign-On user -// or group access to the specified AWS IoT SiteWise Monitor portal or project -// resource. +// Creates an access policy that grants the specified identity (AWS SSO user, +// AWS SSO group, or IAM user) access to the specified AWS IoT SiteWise Monitor +// portal or project resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1055,15 +1055,13 @@ func (c *IoTSiteWise) CreatePortalRequest(input *CreatePortalInput) (req *reques // CreatePortal API operation for AWS IoT SiteWise. // -// Creates a portal, which can contain projects and dashboards. Before you can -// create a portal, you must enable AWS Single Sign-On. AWS IoT SiteWise Monitor -// uses AWS SSO to manage user permissions. For more information, see Enabling -// AWS SSO (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) -// in the AWS IoT SiteWise User Guide. +// Creates a portal, which can contain projects and dashboards. AWS IoT SiteWise +// Monitor uses AWS SSO or IAM to authenticate portal users and manage user +// permissions. // -// Before you can sign in to a new portal, you must add at least one AWS SSO -// user or group to that portal. For more information, see Adding or removing -// portal administrators (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/administer-portals.html#portal-change-admins) +// Before you can sign in to a new portal, you must add at least one identity +// to that portal. For more information, see Adding or removing portal administrators +// (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/administer-portals.html#portal-change-admins) // in the AWS IoT SiteWise User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1122,6 +1120,103 @@ func (c *IoTSiteWise) CreatePortalWithContext(ctx aws.Context, input *CreatePort return out, req.Send() } +const opCreatePresignedPortalUrl = "CreatePresignedPortalUrl" + +// CreatePresignedPortalUrlRequest generates a "aws/request.Request" representing the +// client's request for the CreatePresignedPortalUrl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePresignedPortalUrl for more information on using the CreatePresignedPortalUrl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePresignedPortalUrlRequest method. +// req, resp := client.CreatePresignedPortalUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/CreatePresignedPortalUrl +func (c *IoTSiteWise) CreatePresignedPortalUrlRequest(input *CreatePresignedPortalUrlInput) (req *request.Request, output *CreatePresignedPortalUrlOutput) { + op := &request.Operation{ + Name: opCreatePresignedPortalUrl, + HTTPMethod: "GET", + HTTPPath: "/portals/{portalId}/presigned-url", + } + + if input == nil { + input = &CreatePresignedPortalUrlInput{} + } + + output = &CreatePresignedPortalUrlOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("monitor.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CreatePresignedPortalUrl API operation for AWS IoT SiteWise. +// +// Creates a pre-signed URL to a portal. Use this operation to create URLs to +// portals that use AWS Identity and Access Management (IAM) to authenticate +// users. An IAM user with access to a portal can call this API to get a URL +// to that portal. The URL contains a session token that lets the IAM user access +// the portal. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation CreatePresignedPortalUrl for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * InternalFailureException +// AWS IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of AWS IoT SiteWise assets that can be created per second, the +// allowed number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/CreatePresignedPortalUrl +func (c *IoTSiteWise) CreatePresignedPortalUrl(input *CreatePresignedPortalUrlInput) (*CreatePresignedPortalUrlOutput, error) { + req, out := c.CreatePresignedPortalUrlRequest(input) + return out, req.Send() +} + +// CreatePresignedPortalUrlWithContext is the same as CreatePresignedPortalUrl with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePresignedPortalUrl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) CreatePresignedPortalUrlWithContext(ctx aws.Context, input *CreatePresignedPortalUrlInput, opts ...request.Option) (*CreatePresignedPortalUrlOutput, error) { + req, out := c.CreatePresignedPortalUrlRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateProject = "CreateProject" // CreateProjectRequest generates a "aws/request.Request" representing the @@ -1273,9 +1368,9 @@ func (c *IoTSiteWise) DeleteAccessPolicyRequest(input *DeleteAccessPolicyInput) // DeleteAccessPolicy API operation for AWS IoT SiteWise. // -// Deletes an access policy that grants the specified AWS Single Sign-On identity -// access to the specified AWS IoT SiteWise Monitor resource. You can use this -// operation to revoke access to an AWS IoT SiteWise Monitor resource. +// Deletes an access policy that grants the specified identity access to the +// specified AWS IoT SiteWise Monitor resource. You can use this operation to +// revoke access to an AWS IoT SiteWise Monitor resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1680,9 +1775,7 @@ func (c *IoTSiteWise) DeleteGatewayRequest(input *DeleteGatewayInput) (req *requ // DeleteGateway API operation for AWS IoT SiteWise. // // Deletes a gateway from AWS IoT SiteWise. When you delete a gateway, some -// of the gateway's files remain in your gateway's file system. For more information, -// see Data retention (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/data-retention.html) -// in the AWS IoT SiteWise User Guide. +// of the gateway's files remain in your gateway's file system. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1975,8 +2068,8 @@ func (c *IoTSiteWise) DescribeAccessPolicyRequest(input *DescribeAccessPolicyInp // DescribeAccessPolicy API operation for AWS IoT SiteWise. // -// Describes an access policy, which specifies an AWS SSO user or group's access -// to an AWS IoT SiteWise Monitor portal or project. +// Describes an access policy, which specifies an identity's access to an AWS +// IoT SiteWise Monitor portal or project. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3497,8 +3590,9 @@ func (c *IoTSiteWise) ListAccessPoliciesRequest(input *ListAccessPoliciesInput) // ListAccessPolicies API operation for AWS IoT SiteWise. // -// Retrieves a paginated list of access policies for an AWS SSO identity (a -// user or group) or an AWS IoT SiteWise Monitor resource (a portal or project). +// Retrieves a paginated list of access policies for an identity (an AWS SSO +// user, an AWS SSO group, or an IAM user) or an AWS IoT SiteWise Monitor resource +// (a portal or project). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5269,8 +5363,8 @@ func (c *IoTSiteWise) UpdateAccessPolicyRequest(input *UpdateAccessPolicyInput) // UpdateAccessPolicy API operation for AWS IoT SiteWise. // -// Updates an existing access policy that specifies an AWS SSO user or group's -// access to an AWS IoT SiteWise Monitor portal or project resource. +// Updates an existing access policy that specifies an identity's access to +// an AWS IoT SiteWise Monitor portal or project resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5482,11 +5576,10 @@ func (c *IoTSiteWise) UpdateAssetModelRequest(input *UpdateAssetModelInput) (req // their IDs and definitions in the updated asset model payload. For more information, // see DescribeAssetModel (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeAssetModel.html). // -// If you remove a property from an asset model or update a property's formula -// expression, AWS IoT SiteWise deletes all previous data for that property. -// If you remove a hierarchy definition from an asset model, AWS IoT SiteWise -// disassociates every asset associated with that hierarchy. You can't change -// the type or data type of an existing property. +// If you remove a property from an asset model, AWS IoT SiteWise deletes all +// previous data for that property. If you remove a hierarchy definition from +// an asset model, AWS IoT SiteWise disassociates every asset associated with +// that hierarchy. You can't change the type or data type of an existing property. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6165,8 +6258,8 @@ func (c *IoTSiteWise) UpdateProjectWithContext(ctx aws.Context, input *UpdatePro return out, req.Send() } -// Contains an access policy that defines an AWS SSO identity's access to an -// AWS IoT SiteWise Monitor resource. +// Contains an access policy that defines an identity's access to an AWS IoT +// SiteWise Monitor resource. type AccessPolicySummary struct { _ struct{} `type:"structure"` @@ -6178,7 +6271,7 @@ type AccessPolicySummary struct { // Id is a required field Id *string `locationName:"id" min:"36" type:"string" required:"true"` - // The AWS SSO identity (a user or group). + // The identity (an AWS SSO user, an AWS SSO group, or an IAM user). // // Identity is a required field Identity *Identity `locationName:"identity" type:"structure" required:"true"` @@ -6417,7 +6510,7 @@ type AssetHierarchy struct { // The hierarchy name provided in the CreateAssetModel (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) // or UpdateAssetModel (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) - // API. + // API operation. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -6462,7 +6555,7 @@ type AssetModelHierarchy struct { // The name of the asset model hierarchy that you specify by using the CreateAssetModel // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) // or UpdateAssetModel (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) - // API. + // API operation. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -6532,9 +6625,10 @@ type AssetModelHierarchyDefinition struct { // ChildAssetModelId is a required field ChildAssetModelId *string `locationName:"childAssetModelId" min:"36" type:"string" required:"true"` - // The name of the asset model hierarchy definition (as specified in CreateAssetModel + // The name of the asset model hierarchy definition (as specified in the CreateAssetModel // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) - // or UpdateAssetModel (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html)). + // or UpdateAssetModel (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) + // API operation). // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -7878,8 +7972,8 @@ func (s *ConflictingOperationException) RequestID() string { type CreateAccessPolicyInput struct { _ struct{} `type:"structure"` - // The identity for this access policy. Choose either a user or a group but - // not both. + // The identity for this access policy. Choose an AWS SSO user, an AWS SSO group, + // or an IAM user. // // AccessPolicyIdentity is a required field AccessPolicyIdentity *Identity `locationName:"accessPolicyIdentity" type:"structure" required:"true"` @@ -7891,7 +7985,7 @@ type CreateAccessPolicyInput struct { AccessPolicyPermission *string `locationName:"accessPolicyPermission" type:"string" required:"true" enum:"Permission"` // The AWS IoT SiteWise Monitor resource for this access policy. Choose either - // portal or project but not both. + // a portal or a project. // // AccessPolicyResource is a required field AccessPolicyResource *Resource `locationName:"accessPolicyResource" type:"structure" required:"true"` @@ -8618,6 +8712,26 @@ type CreatePortalInput struct { // is required. ClientToken *string `locationName:"clientToken" min:"36" type:"string" idempotencyToken:"true"` + // The service to use to authenticate users to the portal. Choose from the following + // options: + // + // * SSO – The portal uses AWS Single Sign-On to authenticate users and + // manage user permissions. Before you can create a portal that uses AWS + // SSO, you must enable AWS SSO. For more information, see Enabling AWS SSO + // (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) + // in the AWS IoT SiteWise User Guide. This option is only available in AWS + // Regions other than the China Regions. + // + // * IAM – The portal uses AWS Identity and Access Management (IAM) to + // authenticate users and manage user permissions. IAM users must have the + // iotsitewise:CreatePresignedPortalUrl permission to sign in to the portal. + // This option is only available in the China Regions. + // + // You can't change this value after you create a portal. + // + // Default: SSO + PortalAuthMode *string `locationName:"portalAuthMode" type:"string" enum:"AuthMode"` + // The AWS administrator's contact email address. // // PortalContactEmail is a required field @@ -8708,6 +8822,12 @@ func (s *CreatePortalInput) SetClientToken(v string) *CreatePortalInput { return s } +// SetPortalAuthMode sets the PortalAuthMode field's value. +func (s *CreatePortalInput) SetPortalAuthMode(v string) *CreatePortalInput { + s.PortalAuthMode = &v + return s +} + // SetPortalContactEmail sets the PortalContactEmail field's value. func (s *CreatePortalInput) SetPortalContactEmail(v string) *CreatePortalInput { s.PortalContactEmail = &v @@ -8760,7 +8880,11 @@ type CreatePortalOutput struct { // PortalId is a required field PortalId *string `locationName:"portalId" min:"36" type:"string" required:"true"` - // The public URL for the AWS IoT SiteWise Monitor portal. + // The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to + // access portals that use AWS SSO for authentication. For portals that use + // IAM for authentication, you must use the CreatePresignedPortalUrl (https://docs.aws.amazon.com/AWS + // IoT SiteWise API ReferenceAPI_CreatePresignedPortalUrl.html) operation to + // create a URL that you can use to access the portal. // // PortalStartUrl is a required field PortalStartUrl *string `locationName:"portalStartUrl" min:"1" type:"string" required:"true"` @@ -8771,7 +8895,7 @@ type CreatePortalOutput struct { // PortalStatus is a required field PortalStatus *PortalStatus `locationName:"portalStatus" type:"structure" required:"true"` - // The associated AWS SSO application Id. + // The associated AWS SSO application ID, if the portal uses AWS SSO. // // SsoApplicationId is a required field SsoApplicationId *string `locationName:"ssoApplicationId" min:"1" type:"string" required:"true"` @@ -8817,6 +8941,89 @@ func (s *CreatePortalOutput) SetSsoApplicationId(v string) *CreatePortalOutput { return s } +type CreatePresignedPortalUrlInput struct { + _ struct{} `type:"structure"` + + // The ID of the portal to access. + // + // PortalId is a required field + PortalId *string `location:"uri" locationName:"portalId" min:"36" type:"string" required:"true"` + + // The duration (in seconds) for which the session at the URL is valid. + // + // Default: 900 seconds (15 minutes) + SessionDurationSeconds *int64 `location:"querystring" locationName:"sessionDurationSeconds" min:"900" type:"integer"` +} + +// String returns the string representation +func (s CreatePresignedPortalUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresignedPortalUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePresignedPortalUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePresignedPortalUrlInput"} + if s.PortalId == nil { + invalidParams.Add(request.NewErrParamRequired("PortalId")) + } + if s.PortalId != nil && len(*s.PortalId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("PortalId", 36)) + } + if s.SessionDurationSeconds != nil && *s.SessionDurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("SessionDurationSeconds", 900)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPortalId sets the PortalId field's value. +func (s *CreatePresignedPortalUrlInput) SetPortalId(v string) *CreatePresignedPortalUrlInput { + s.PortalId = &v + return s +} + +// SetSessionDurationSeconds sets the SessionDurationSeconds field's value. +func (s *CreatePresignedPortalUrlInput) SetSessionDurationSeconds(v int64) *CreatePresignedPortalUrlInput { + s.SessionDurationSeconds = &v + return s +} + +type CreatePresignedPortalUrlOutput struct { + _ struct{} `type:"structure"` + + // The pre-signed URL to the portal. The URL contains the portal ID and a session + // token that lets you access the portal. The URL has the following format. + // + // https://