From d61b5525f9d21d98bb3a0853910e6d93c928200d Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Mon, 9 Jan 2023 16:55:42 -0500 Subject: [PATCH 1/5] r/aws_rds_export_task: resource implementation --- internal/service/rds/export_task.go | 333 ++++++++++++++++++++++++++++ 1 file changed, 333 insertions(+) create mode 100644 internal/service/rds/export_task.go diff --git a/internal/service/rds/export_task.go b/internal/service/rds/export_task.go new file mode 100644 index 000000000000..44875512af14 --- /dev/null +++ b/internal/service/rds/export_task.go @@ -0,0 +1,333 @@ +package rds + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + awstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + sdkv2resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func init() { + _sp.registerFrameworkResourceFactory(newResourceExportTask) +} + +func newResourceExportTask(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceExportTask{}, nil +} + +const ( + ResNameExportTask = "ExportTask" +) + +type resourceExportTask struct { + framework.ResourceWithConfigure +} + +func (r *resourceExportTask) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_rds_export_task" +} + +func (r *resourceExportTask) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "export_only": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "export_task_identifier": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "failure_cause": schema.StringAttribute{ + Computed: true, + }, + "iam_role_arn": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "id": framework.IDAttribute(), + "kms_key_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "percent_progress": schema.Int64Attribute{ + Computed: true, + }, + "s3_bucket_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "s3_prefix": schema.StringAttribute{ + Optional: true, + Computed: true, // This attribute can be returned by the Describe API even if unset + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + stringplanmodifier.UseStateForUnknown(), + }, + }, + "snapshot_time": schema.StringAttribute{ + Computed: true, + }, + "source_arn": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "source_type": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "task_end_time": schema.StringAttribute{ + Computed: true, + }, + "task_start_time": schema.StringAttribute{ + Computed: true, + }, + "warning_message": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (r *resourceExportTask) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().RDSClient() + + var plan resourceExportTaskData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + in := rds.StartExportTaskInput{ + ExportTaskIdentifier: aws.String(plan.ExportTaskIdentifier.ValueString()), + IamRoleArn: aws.String(plan.IAMRoleArn.ValueString()), + KmsKeyId: aws.String(plan.KMSKeyID.ValueString()), + S3BucketName: aws.String(plan.S3BucketName.ValueString()), + SourceArn: aws.String(plan.SourceArn.ValueString()), + } + if !plan.ExportOnly.IsNull() { + in.ExportOnly = flex.ExpandFrameworkStringValueList(ctx, plan.ExportOnly) + } + if !plan.S3Prefix.IsNull() && !plan.S3Prefix.IsUnknown() { + in.S3Prefix = aws.String(plan.S3Prefix.ValueString()) + } + + out, err := conn.StartExportTask(ctx, &in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.RDS, create.ErrActionCreating, ResNameExportTask, plan.ExportTaskIdentifier.String(), nil), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.RDS, create.ErrActionCreating, ResNameExportTask, plan.ExportTaskIdentifier.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + state := plan + state.refreshFromStartOutput(ctx, out) + resp.Diagnostics.Append(resp.State.Set(ctx, state)...) +} + +func (r *resourceExportTask) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().RDSClient() + + var state resourceExportTaskData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := FindExportTaskByID(ctx, conn, state.ID.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.AddWarning( + "AWS Resource Not Found During Refresh", + fmt.Sprintf("Automatically removing from Terraform State instead of returning the error, which may trigger resource recreation. Original Error: %s", err.Error()), + ) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.RDS, create.ErrActionReading, ResNameExportTask, state.ID.String(), nil), + err.Error(), + ) + return + } + + state.refreshFromDescribeOutput(ctx, out) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +// There is no update API, so this method is a no-op +func (r *resourceExportTask) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *resourceExportTask) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().RDSClient() + + var state resourceExportTaskData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // Attempt to cancel the export task, but ignore errors where the task is in an invalid + // state (ie. completed or failed) which can't be cancelled + _, err := conn.CancelExportTask(ctx, &rds.CancelExportTaskInput{ + ExportTaskIdentifier: aws.String(state.ID.ValueString()), + }) + if err != nil { + var stateFault *awstypes.InvalidExportTaskStateFault + if errors.As(err, &stateFault) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.RDS, create.ErrActionDeleting, ResNameExportTask, state.ID.String(), nil), + err.Error(), + ) + } +} + +func (r *resourceExportTask) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +func FindExportTaskByID(ctx context.Context, conn *rds.Client, id string) (*awstypes.ExportTask, error) { + in := &rds.DescribeExportTasksInput{ + ExportTaskIdentifier: aws.String(id), + } + out, err := conn.DescribeExportTasks(ctx, in) + // This API won't return a NotFound error if the identifier can't be found, just + // an empty result slice. + if err != nil { + return nil, err + } + if out == nil || len(out.ExportTasks) == 0 { + return nil, &sdkv2resource.NotFoundError{ + LastRequest: in, + } + } + if len(out.ExportTasks) != 1 { + return nil, tfresource.NewTooManyResultsError(len(out.ExportTasks), in) + } + + return &out.ExportTasks[0], nil +} + +type resourceExportTaskData struct { + ExportOnly types.List `tfsdk:"export_only"` + ExportTaskIdentifier types.String `tfsdk:"export_task_identifier"` + FailureCause types.String `tfsdk:"failure_cause"` + IAMRoleArn types.String `tfsdk:"iam_role_arn"` + ID types.String `tfsdk:"id"` + KMSKeyID types.String `tfsdk:"kms_key_id"` + PercentProgress types.Int64 `tfsdk:"percent_progress"` + S3BucketName types.String `tfsdk:"s3_bucket_name"` + S3Prefix types.String `tfsdk:"s3_prefix"` + SnapshotTime types.String `tfsdk:"snapshot_time"` + SourceArn types.String `tfsdk:"source_arn"` + SourceType types.String `tfsdk:"source_type"` + Status types.String `tfsdk:"status"` + TaskEndTime types.String `tfsdk:"task_end_time"` + TaskStartTime types.String `tfsdk:"task_start_time"` + WarningMessage types.String `tfsdk:"warning_message"` +} + +// refreshFromOutput writes state data from an AWS response object +// +// This variant of the refresh method is for use with the start operation +// response type (StartExportTaskOutput). +func (rd *resourceExportTaskData) refreshFromStartOutput(ctx context.Context, out *rds.StartExportTaskOutput) { + if out == nil { + return + } + + rd.ID = flex.StringToFramework(ctx, out.ExportTaskIdentifier) + rd.ExportOnly = flex.FlattenFrameworkStringValueList(ctx, out.ExportOnly) + rd.ExportTaskIdentifier = flex.StringToFramework(ctx, out.ExportTaskIdentifier) + rd.FailureCause = flex.StringToFramework(ctx, out.FailureCause) + rd.IAMRoleArn = flex.StringToFramework(ctx, out.IamRoleArn) + rd.KMSKeyID = flex.StringToFramework(ctx, out.KmsKeyId) + rd.PercentProgress = types.Int64Value(int64(out.PercentProgress)) + rd.S3BucketName = flex.StringToFramework(ctx, out.S3Bucket) + rd.S3Prefix = flex.StringToFramework(ctx, out.S3Prefix) + rd.SnapshotTime = flex.StringValueToFramework(ctx, out.SnapshotTime.String()) + rd.SourceArn = flex.StringToFramework(ctx, out.SourceArn) + rd.SourceType = flex.StringValueToFramework(ctx, out.SourceType) + rd.Status = flex.StringToFramework(ctx, out.Status) + rd.TaskEndTime = timeToFramework(ctx, out.TaskEndTime) + rd.TaskStartTime = timeToFramework(ctx, out.TaskEndTime) + rd.WarningMessage = flex.StringToFramework(ctx, out.WarningMessage) +} + +// refreshFromOutput writes state data from an AWS response object +// +// This variant of the refresh method is for use with the describe operation +// response type (ExportTask). +func (rd *resourceExportTaskData) refreshFromDescribeOutput(ctx context.Context, out *awstypes.ExportTask) { + if out == nil { + return + } + + rd.ID = flex.StringToFramework(ctx, out.ExportTaskIdentifier) + rd.ExportOnly = flex.FlattenFrameworkStringValueList(ctx, out.ExportOnly) + rd.ExportTaskIdentifier = flex.StringToFramework(ctx, out.ExportTaskIdentifier) + rd.FailureCause = flex.StringToFramework(ctx, out.FailureCause) + rd.IAMRoleArn = flex.StringToFramework(ctx, out.IamRoleArn) + rd.KMSKeyID = flex.StringToFramework(ctx, out.KmsKeyId) + rd.PercentProgress = types.Int64Value(int64(out.PercentProgress)) + rd.S3BucketName = flex.StringToFramework(ctx, out.S3Bucket) + rd.S3Prefix = flex.StringToFramework(ctx, out.S3Prefix) + rd.SnapshotTime = flex.StringValueToFramework(ctx, out.SnapshotTime.String()) + rd.SourceArn = flex.StringToFramework(ctx, out.SourceArn) + rd.SourceType = flex.StringValueToFramework(ctx, out.SourceType) + rd.Status = flex.StringToFramework(ctx, out.Status) + rd.TaskEndTime = timeToFramework(ctx, out.TaskEndTime) + rd.TaskStartTime = timeToFramework(ctx, out.TaskEndTime) + rd.WarningMessage = flex.StringToFramework(ctx, out.WarningMessage) +} + +func timeToFramework(ctx context.Context, t *time.Time) basetypes.StringValue { + if t == nil { + return types.StringNull() + } + return flex.StringValueToFramework(ctx, t.String()) +} From 057bc486cf82aaf2c1e38b316a5a7633528b5908 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 11 Jan 2023 11:05:16 -0500 Subject: [PATCH 2/5] r/aws_rds_export_task: acc tests --- internal/service/rds/export_task_test.go | 285 +++++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 internal/service/rds/export_task_test.go diff --git a/internal/service/rds/export_task_test.go b/internal/service/rds/export_task_test.go new file mode 100644 index 000000000000..68afa1298c75 --- /dev/null +++ b/internal/service/rds/export_task_test.go @@ -0,0 +1,285 @@ +package rds_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds/types" + rdsv1 "github.com/aws/aws-sdk-go/service/rds" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfrds "github.com/hashicorp/terraform-provider-aws/internal/service/rds" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccRDSExportTask_basic(t *testing.T) { + var exportTask types.ExportTask + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_export_task.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(rdsv1.EndpointsID, t) + }, + ErrorCheck: acctest.ErrorCheck(t, rdsv1.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckExportTaskDestroy, + Steps: []resource.TestStep{ + { + Config: testAccExportTaskConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckExportTaskExists(resourceName, &exportTask), + resource.TestCheckResourceAttr(resourceName, "export_task_identifier", rName), + resource.TestCheckResourceAttr(resourceName, "id", rName), + resource.TestCheckResourceAttrPair(resourceName, "source_arn", "aws_db_snapshot.test", "db_snapshot_arn"), + resource.TestCheckResourceAttrPair(resourceName, "s3_bucket_name", "aws_s3_bucket.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "iam_role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", "aws_kms_key.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccRDSExportTask_optional(t *testing.T) { + var exportTask types.ExportTask + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_export_task.test" + s3Prefix := "test_prefix/test-export" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(rdsv1.EndpointsID, t) + }, + ErrorCheck: acctest.ErrorCheck(t, rdsv1.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckExportTaskDestroy, + Steps: []resource.TestStep{ + { + Config: testAccExportTaskConfig_optional(rName, s3Prefix), + Check: resource.ComposeTestCheckFunc( + testAccCheckExportTaskExists(resourceName, &exportTask), + resource.TestCheckResourceAttr(resourceName, "export_task_identifier", rName), + resource.TestCheckResourceAttr(resourceName, "id", rName), + resource.TestCheckResourceAttrPair(resourceName, "source_arn", "aws_db_snapshot.test", "db_snapshot_arn"), + resource.TestCheckResourceAttrPair(resourceName, "s3_bucket_name", "aws_s3_bucket.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "iam_role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", "aws_kms_key.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "export_only.#", "1"), + resource.TestCheckResourceAttr(resourceName, "export_only.0", "database"), + resource.TestCheckResourceAttr(resourceName, "s3_prefix", s3Prefix), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckExportTaskDestroy(s *terraform.State) error { + ctx := context.Background() + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_export_task" { + continue + } + + out, err := tfrds.FindExportTaskByID(ctx, conn, rs.Primary.ID) + if err != nil { + var nfe *resource.NotFoundError + if errors.As(err, &nfe) { + return nil + } + return err + } + if !isInDestroyedStatus(aws.ToString(out.Status)) { + return create.Error(names.RDS, create.ErrActionCheckingDestroyed, tfrds.ResNameExportTask, rs.Primary.ID, errors.New("not destroyed")) + } + } + + return nil +} + +func testAccCheckExportTaskExists(name string, exportTask *types.ExportTask) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.RDS, create.ErrActionCheckingExistence, tfrds.ResNameExportTask, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.RDS, create.ErrActionCheckingExistence, tfrds.ResNameExportTask, name, errors.New("not set")) + } + + ctx := context.Background() + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient() + resp, err := tfrds.FindExportTaskByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.RDS, create.ErrActionCheckingExistence, tfrds.ResNameExportTask, rs.Primary.ID, err) + } + + *exportTask = *resp + + return nil + } +} + +// isInDestroyedStatus determines whether the export task status is a value that could +// be returned if the resource was properly destroyed. +// +// COMPLETED and FAILED statuses are valid because the resource is simply removed from +// state in these scenarios. In-progress tasks should be cancelled upon destroy, so CANCELED +// and CANCELLING are also valid. +func isInDestroyedStatus(s string) bool { + // AWS does not provide enum types for these statuses + deletedStatuses := []string{"CANCELED", "CANCELLING", "COMPLETED", "FAILED"} + for _, status := range deletedStatuses { + if s == status { + return true + } + } + return false +} + +func testAccExportTaskConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "export.rds.amazonaws.com" + } + }, + ] + }) +} + +data "aws_iam_policy_document" "test" { + statement { + actions = [ + "s3:ListAllMyBuckets", + ] + resources = [ + "*" + ] + } + statement { + actions = [ + "s3:GetBucketLocation", + "s3:ListBucket", + ] + resources = [ + aws_s3_bucket.test.arn, + ] + } + statement { + actions = [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + ] + resources = [ + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_iam_policy" "test" { + name = %[1]q + policy = data.aws_iam_policy_document.test.json +} + +resource "aws_iam_role_policy_attachment" "test-attach" { + role = aws_iam_role.test.name + policy_arn = aws_iam_policy.test.arn +} + +resource "aws_kms_key" "test" { + deletion_window_in_days = 10 +} + +resource "aws_db_instance" "test" { + identifier = %[1]q + allocated_storage = 10 + db_name = "test" + engine = "mysql" + engine_version = "5.7" + instance_class = "db.t3.micro" + username = "foo" + password = "foobarbaz" + parameter_group_name = "default.mysql5.7" + skip_final_snapshot = true +} + +resource "aws_db_snapshot" "test" { + db_instance_identifier = aws_db_instance.test.id + db_snapshot_identifier = %[1]q +} +`, rName) +} + +func testAccExportTaskConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccExportTaskConfigBase(rName), + fmt.Sprintf(` +resource "aws_rds_export_task" "test" { + export_task_identifier = %[1]q + source_arn = aws_db_snapshot.test.db_snapshot_arn + s3_bucket_name = aws_s3_bucket.test.id + iam_role_arn = aws_iam_role.test.arn + kms_key_id = aws_kms_key.test.arn +} +`, rName)) +} + +func testAccExportTaskConfig_optional(rName, s3Prefix string) string { + return acctest.ConfigCompose( + testAccExportTaskConfigBase(rName), + fmt.Sprintf(` +resource "aws_rds_export_task" "test" { + export_task_identifier = %[1]q + source_arn = aws_db_snapshot.test.db_snapshot_arn + s3_bucket_name = aws_s3_bucket.test.id + iam_role_arn = aws_iam_role.test.arn + kms_key_id = aws_kms_key.test.arn + + export_only = ["database"] + s3_prefix = %[2]q +} +`, rName, s3Prefix)) +} From dc2a92c70110f0ef1bd8721d9a53d00561c59c17 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 11 Jan 2023 13:42:01 -0500 Subject: [PATCH 3/5] r/aws_rds_export_task: docs --- website/docs/r/rds_export_task.html.markdown | 167 +++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 website/docs/r/rds_export_task.html.markdown diff --git a/website/docs/r/rds_export_task.html.markdown b/website/docs/r/rds_export_task.html.markdown new file mode 100644 index 000000000000..15e2bf0b7c79 --- /dev/null +++ b/website/docs/r/rds_export_task.html.markdown @@ -0,0 +1,167 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_rds_export_task" +description: |- + Terraform resource for managing an AWS RDS (Relational Database) Export Task. +--- + +# Resource: aws_rds_export_task + +Terraform resource for managing an AWS RDS (Relational Database) Export Task. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_rds_export_task" "example" { + export_task_identifier = "example" + source_arn = aws_db_snapshot.example.db_snapshot_arn + s3_bucket_name = aws_s3_bucket.example.id + iam_role_arn = aws_iam_role.example.arn + kms_key_id = aws_kms_key.example.arn +} +``` + +### Complete Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example" + force_destroy = true +} + +resource "aws_s3_bucket_acl" "example" { + bucket = aws_s3_bucket.example.id + acl = "private" +} + +resource "aws_iam_role" "example" { + name = "example" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "export.rds.amazonaws.com" + } + }, + ] + }) +} + +data "aws_iam_policy_document" "example" { + statement { + actions = [ + "s3:ListAllMyBuckets", + ] + resources = [ + "*" + ] + } + statement { + actions = [ + "s3:GetBucketLocation", + "s3:ListBucket", + ] + resources = [ + aws_s3_bucket.example.arn, + ] + } + statement { + actions = [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + ] + resources = [ + "${aws_s3_bucket.example.arn}/*" + ] + } +} + +resource "aws_iam_policy" "example" { + name = "example" + policy = data.aws_iam_policy_document.example.json +} + +resource "aws_iam_role_policy_attachment" "example" { + role = aws_iam_role.example.name + policy_arn = aws_iam_policy.example.arn +} + +resource "aws_kms_key" "example" { + deletion_window_in_days = 10 +} + +resource "aws_db_instance" "example" { + identifier = "example" + allocated_storage = 10 + db_name = "test" + engine = "mysql" + engine_version = "5.7" + instance_class = "db.t3.micro" + username = "foo" + password = "foobarbaz" + parameter_group_name = "default.mysql5.7" + skip_final_snapshot = true +} + +resource "aws_db_snapshot" "example" { + db_instance_identifier = aws_db_instance.example.id + db_snapshot_identifier = "example" +} + +resource "aws_rds_export_task" "example" { + export_task_identifier = "example" + source_arn = aws_db_snapshot.example.db_snapshot_arn + s3_bucket_name = aws_s3_bucket.example.id + iam_role_arn = aws_iam_role.example.arn + kms_key_id = aws_kms_key.example.arn + + export_only = ["database"] + s3_prefix = "my_prefix/example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `export_task_identifier` - (Required) Unique identifier for the snapshot export task. +* `iam_role_arn` - (Required) ARN of the IAM role to use for writing to the Amazon S3 bucket. +* `kms_key_id` - (Required) ID of the Amazon Web Services KMS key to use to encrypt the snapshot. +* `s3_bucket_name` - (Required) Name of the Amazon S3 bucket to export the snapshot to. +* `source_arn` - (Required) Amazon Resource Name (ARN) of the snapshot to export. + +The following arguments are optional: + +* `export_only` - (Optional) Data to be exported from the snapshot. If this parameter is not provided, all the snapshot data is exported. Valid values are documented in the [AWS StartExportTask API documentation](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartExportTask.html#API_StartExportTask_RequestParameters). +* `s3_prefix` - (Optional) Amazon S3 bucket prefix to use as the file name and path of the exported snapshot. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `failure_cause` - Reason the export failed, if it failed. +* `id` - Unique identifier for the snapshot export task (same value as `export_task_identifier`). +* `percent_progress` - Progress of the snapshot export task as a percentage. +* `snapshot_time` - Time that the snapshot was created. +* `source_type` - Type of source for the export. +* `status` - Status of the export task. +* `task_end_time` - Time that the snapshot export task completed. +* `task_start_time` - Time that the snapshot export task started. +* `warning_message` - Warning about the snapshot export task, if any. + +## Import + +A RDS (Relational Database) Export Task can be imported using the `export_task_identifier`, e.g., + +``` +$ terraform import aws_rds_export_task.example example +``` From 7595e001fe99c637c1cb9fa15ade78465d44bda0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 11 Jan 2023 14:33:07 -0500 Subject: [PATCH 4/5] chore: changelog --- .changelog/28831.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/28831.txt diff --git a/.changelog/28831.txt b/.changelog/28831.txt new file mode 100644 index 000000000000..25d697c57e66 --- /dev/null +++ b/.changelog/28831.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_rds_export_task +``` From 0647843ea9d88e1ed8035e5c70199be50424a485 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 19 Jan 2023 11:31:26 -0500 Subject: [PATCH 5/5] r/aws_rds_export_task: add waiters --- internal/service/rds/export_task.go | 159 ++++++++++++++++------- internal/service/rds/export_task_test.go | 11 +- 2 files changed, 116 insertions(+), 54 deletions(-) diff --git a/internal/service/rds/export_task.go b/internal/service/rds/export_task.go index 44875512af14..1f51d8947938 100644 --- a/internal/service/rds/export_task.go +++ b/internal/service/rds/export_task.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rds" awstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -30,15 +31,28 @@ func init() { } func newResourceExportTask(_ context.Context) (resource.ResourceWithConfigure, error) { - return &resourceExportTask{}, nil + r := &resourceExportTask{} + r.SetDefaultCreateTimeout(60 * time.Minute) + r.SetDefaultDeleteTimeout(20 * time.Minute) + + return r, nil } const ( ResNameExportTask = "ExportTask" + + // Use string constants as the RDS package does not provide status enums + StatusCanceled = "CANCELED" + StatusCanceling = "CANCELING" + StatusComplete = "COMPLETE" + StatusFailed = "FAILED" + StatusInProgress = "IN_PROGRESS" + StatusStarting = "STARTING" ) type resourceExportTask struct { framework.ResourceWithConfigure + framework.WithTimeouts } func (r *resourceExportTask) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { @@ -119,6 +133,12 @@ func (r *resourceExportTask) Schema(ctx context.Context, req resource.SchemaRequ Computed: true, }, }, + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), + }, } } @@ -145,7 +165,7 @@ func (r *resourceExportTask) Create(ctx context.Context, req resource.CreateRequ in.S3Prefix = aws.String(plan.S3Prefix.ValueString()) } - out, err := conn.StartExportTask(ctx, &in) + outStart, err := conn.StartExportTask(ctx, &in) if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.RDS, create.ErrActionCreating, ResNameExportTask, plan.ExportTaskIdentifier.String(), nil), @@ -153,7 +173,7 @@ func (r *resourceExportTask) Create(ctx context.Context, req resource.CreateRequ ) return } - if out == nil { + if outStart == nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.RDS, create.ErrActionCreating, ResNameExportTask, plan.ExportTaskIdentifier.String(), nil), errors.New("empty output").Error(), @@ -161,8 +181,18 @@ func (r *resourceExportTask) Create(ctx context.Context, req resource.CreateRequ return } + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + out, err := waitExportTaskCreated(ctx, conn, plan.ExportTaskIdentifier.ValueString(), createTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.RDS, create.ErrActionCreating, ResNameExportTask, plan.ExportTaskIdentifier.String(), nil), + err.Error(), + ) + return + } + state := plan - state.refreshFromStartOutput(ctx, out) + state.refreshFromOutput(ctx, out) resp.Diagnostics.Append(resp.State.Set(ctx, state)...) } @@ -192,7 +222,7 @@ func (r *resourceExportTask) Read(ctx context.Context, req resource.ReadRequest, return } - state.refreshFromDescribeOutput(ctx, out) + state.refreshFromOutput(ctx, out) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -224,6 +254,15 @@ func (r *resourceExportTask) Delete(ctx context.Context, req resource.DeleteRequ err.Error(), ) } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitExportTaskDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.RDS, create.ErrActionDeleting, ResNameExportTask, state.ID.String(), nil), + err.Error(), + ) + } } func (r *resourceExportTask) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { @@ -252,57 +291,77 @@ func FindExportTaskByID(ctx context.Context, conn *rds.Client, id string) (*awst return &out.ExportTasks[0], nil } -type resourceExportTaskData struct { - ExportOnly types.List `tfsdk:"export_only"` - ExportTaskIdentifier types.String `tfsdk:"export_task_identifier"` - FailureCause types.String `tfsdk:"failure_cause"` - IAMRoleArn types.String `tfsdk:"iam_role_arn"` - ID types.String `tfsdk:"id"` - KMSKeyID types.String `tfsdk:"kms_key_id"` - PercentProgress types.Int64 `tfsdk:"percent_progress"` - S3BucketName types.String `tfsdk:"s3_bucket_name"` - S3Prefix types.String `tfsdk:"s3_prefix"` - SnapshotTime types.String `tfsdk:"snapshot_time"` - SourceArn types.String `tfsdk:"source_arn"` - SourceType types.String `tfsdk:"source_type"` - Status types.String `tfsdk:"status"` - TaskEndTime types.String `tfsdk:"task_end_time"` - TaskStartTime types.String `tfsdk:"task_start_time"` - WarningMessage types.String `tfsdk:"warning_message"` +func statusExportTask(ctx context.Context, conn *rds.Client, id string) sdkv2resource.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := FindExportTaskByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, aws.ToString(out.Status), nil + } } -// refreshFromOutput writes state data from an AWS response object -// -// This variant of the refresh method is for use with the start operation -// response type (StartExportTaskOutput). -func (rd *resourceExportTaskData) refreshFromStartOutput(ctx context.Context, out *rds.StartExportTaskOutput) { - if out == nil { - return +func waitExportTaskCreated(ctx context.Context, conn *rds.Client, id string, timeout time.Duration) (*awstypes.ExportTask, error) { + stateConf := &sdkv2resource.StateChangeConf{ + Pending: []string{StatusStarting, StatusInProgress}, + Target: []string{StatusComplete, StatusFailed}, + Refresh: statusExportTask(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, } - rd.ID = flex.StringToFramework(ctx, out.ExportTaskIdentifier) - rd.ExportOnly = flex.FlattenFrameworkStringValueList(ctx, out.ExportOnly) - rd.ExportTaskIdentifier = flex.StringToFramework(ctx, out.ExportTaskIdentifier) - rd.FailureCause = flex.StringToFramework(ctx, out.FailureCause) - rd.IAMRoleArn = flex.StringToFramework(ctx, out.IamRoleArn) - rd.KMSKeyID = flex.StringToFramework(ctx, out.KmsKeyId) - rd.PercentProgress = types.Int64Value(int64(out.PercentProgress)) - rd.S3BucketName = flex.StringToFramework(ctx, out.S3Bucket) - rd.S3Prefix = flex.StringToFramework(ctx, out.S3Prefix) - rd.SnapshotTime = flex.StringValueToFramework(ctx, out.SnapshotTime.String()) - rd.SourceArn = flex.StringToFramework(ctx, out.SourceArn) - rd.SourceType = flex.StringValueToFramework(ctx, out.SourceType) - rd.Status = flex.StringToFramework(ctx, out.Status) - rd.TaskEndTime = timeToFramework(ctx, out.TaskEndTime) - rd.TaskStartTime = timeToFramework(ctx, out.TaskEndTime) - rd.WarningMessage = flex.StringToFramework(ctx, out.WarningMessage) + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*awstypes.ExportTask); ok { + return out, err + } + + return nil, err +} + +func waitExportTaskDeleted(ctx context.Context, conn *rds.Client, id string, timeout time.Duration) (*awstypes.ExportTask, error) { + stateConf := &sdkv2resource.StateChangeConf{ + Pending: []string{StatusStarting, StatusInProgress, StatusCanceling}, + Target: []string{}, + Refresh: statusExportTask(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*awstypes.ExportTask); ok { + return out, err + } + + return nil, err +} + +type resourceExportTaskData struct { + ExportOnly types.List `tfsdk:"export_only"` + ExportTaskIdentifier types.String `tfsdk:"export_task_identifier"` + FailureCause types.String `tfsdk:"failure_cause"` + IAMRoleArn types.String `tfsdk:"iam_role_arn"` + ID types.String `tfsdk:"id"` + KMSKeyID types.String `tfsdk:"kms_key_id"` + PercentProgress types.Int64 `tfsdk:"percent_progress"` + S3BucketName types.String `tfsdk:"s3_bucket_name"` + S3Prefix types.String `tfsdk:"s3_prefix"` + SnapshotTime types.String `tfsdk:"snapshot_time"` + SourceArn types.String `tfsdk:"source_arn"` + SourceType types.String `tfsdk:"source_type"` + Status types.String `tfsdk:"status"` + TaskEndTime types.String `tfsdk:"task_end_time"` + TaskStartTime types.String `tfsdk:"task_start_time"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + WarningMessage types.String `tfsdk:"warning_message"` } // refreshFromOutput writes state data from an AWS response object -// -// This variant of the refresh method is for use with the describe operation -// response type (ExportTask). -func (rd *resourceExportTaskData) refreshFromDescribeOutput(ctx context.Context, out *awstypes.ExportTask) { +func (rd *resourceExportTaskData) refreshFromOutput(ctx context.Context, out *awstypes.ExportTask) { if out == nil { return } @@ -316,7 +375,7 @@ func (rd *resourceExportTaskData) refreshFromDescribeOutput(ctx context.Context, rd.PercentProgress = types.Int64Value(int64(out.PercentProgress)) rd.S3BucketName = flex.StringToFramework(ctx, out.S3Bucket) rd.S3Prefix = flex.StringToFramework(ctx, out.S3Prefix) - rd.SnapshotTime = flex.StringValueToFramework(ctx, out.SnapshotTime.String()) + rd.SnapshotTime = timeToFramework(ctx, out.SnapshotTime) rd.SourceArn = flex.StringToFramework(ctx, out.SourceArn) rd.SourceType = flex.StringValueToFramework(ctx, out.SourceType) rd.Status = flex.StringToFramework(ctx, out.Status) diff --git a/internal/service/rds/export_task_test.go b/internal/service/rds/export_task_test.go index 68afa1298c75..66c61c395d86 100644 --- a/internal/service/rds/export_task_test.go +++ b/internal/service/rds/export_task_test.go @@ -145,12 +145,15 @@ func testAccCheckExportTaskExists(name string, exportTask *types.ExportTask) res // isInDestroyedStatus determines whether the export task status is a value that could // be returned if the resource was properly destroyed. // -// COMPLETED and FAILED statuses are valid because the resource is simply removed from +// COMPLETE and FAILED statuses are valid because the resource is simply removed from // state in these scenarios. In-progress tasks should be cancelled upon destroy, so CANCELED -// and CANCELLING are also valid. +// is also valid. func isInDestroyedStatus(s string) bool { - // AWS does not provide enum types for these statuses - deletedStatuses := []string{"CANCELED", "CANCELLING", "COMPLETED", "FAILED"} + deletedStatuses := []string{ + tfrds.StatusComplete, + tfrds.StatusFailed, + tfrds.StatusCanceled, + } for _, status := range deletedStatuses { if s == status { return true