Skip to content

Commit

Permalink
Merge pull request #14420 from terraform-providers/b-rds-cluster-destroy
Browse files Browse the repository at this point in the history
resource/rds_cluster: update delete timeout and add additional retry condition
  • Loading branch information
anGie44 authored Jul 31, 2020
2 parents 6c8733e + 54497cc commit cc040d1
Show file tree
Hide file tree
Showing 2 changed files with 130 additions and 1 deletion.
6 changes: 5 additions & 1 deletion aws/resource_aws_rds_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
const (
rdsClusterScalingConfiguration_DefaultMinCapacity = 1
rdsClusterScalingConfiguration_DefaultMaxCapacity = 16
rdsClusterTimeoutDelete = 2 * time.Minute
)

func resourceAwsRDSCluster() *schema.Resource {
Expand Down Expand Up @@ -1283,12 +1284,15 @@ func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error

log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)

err := resource.Retry(1*time.Minute, func() *resource.RetryError {
err := resource.Retry(rdsClusterTimeoutDelete, func() *resource.RetryError {
_, err := conn.DeleteDBCluster(&deleteOpts)
if err != nil {
if isAWSErr(err, rds.ErrCodeInvalidDBClusterStateFault, "is not currently in the available state") {
return resource.RetryableError(err)
}
if isAWSErr(err, rds.ErrCodeInvalidDBClusterStateFault, "cluster is a part of a global cluster") {
return resource.RetryableError(err)
}
if isAWSErr(err, rds.ErrCodeDBClusterNotFoundFault, "") {
return nil
}
Expand Down
125 changes: 125 additions & 0 deletions aws/resource_aws_rds_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1264,6 +1264,34 @@ func TestAccAWSRDSCluster_GlobalClusterIdentifier_EngineMode_Provisioned(t *test
})
}

// Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/13126
func TestAccAWSRDSCluster_GlobalClusterIdentifier_PrimarySecondaryClusters(t *testing.T) {
var providers []*schema.Provider
var primaryDbCluster, secondaryDbCluster rds.DBCluster

rNameGlobal := acctest.RandomWithPrefix("tf-acc-test-global")
rNamePrimary := acctest.RandomWithPrefix("tf-acc-test-primary")
rNameSecondary := acctest.RandomWithPrefix("tf-acc-test-secondary")

resourceNamePrimary := "aws_rds_cluster.primary"
resourceNameSecondary := "aws_rds_cluster.secondary"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories(&providers),
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRDSClusterConfig_GlobalClusterIdentifier_PrimarySecondaryClusters(rNameGlobal, rNamePrimary, rNameSecondary),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExistsWithProvider(resourceNamePrimary, &primaryDbCluster, testAccAwsRegionProviderFunc(testAccGetRegion(), &providers)),
testAccCheckAWSClusterExistsWithProvider(resourceNameSecondary, &secondaryDbCluster, testAccAwsRegionProviderFunc(testAccGetAlternateRegion(), &providers)),
),
},
},
})
}

func TestAccAWSRDSCluster_Port(t *testing.T) {
var dbCluster1, dbCluster2 rds.DBCluster
rInt := acctest.RandInt()
Expand Down Expand Up @@ -3137,6 +3165,103 @@ resource "aws_rds_cluster" "test" {
`, rName)
}

func testAccAWSRDSClusterConfig_GlobalClusterIdentifier_PrimarySecondaryClusters(rNameGlobal, rNamePrimary, rNameSecondary string) string {
return composeConfig(
testAccMultipleRegionProviderConfig(2),
fmt.Sprintf(`
data "aws_region" "current" {}
data "aws_availability_zones" "alternate" {
provider = "awsalternate"
state = "available"
filter {
name = "opt-in-status"
values = ["opt-in-not-required"]
}
}
resource "aws_rds_global_cluster" "test" {
global_cluster_identifier = "%[1]s"
engine = "aurora-mysql"
engine_version = "5.7.mysql_aurora.2.07.1"
}
resource "aws_rds_cluster" "primary" {
cluster_identifier = "%[2]s"
database_name = "mydb"
master_username = "foo"
master_password = "barbarbar"
skip_final_snapshot = true
global_cluster_identifier = aws_rds_global_cluster.test.id
engine = aws_rds_global_cluster.test.engine
engine_version = aws_rds_global_cluster.test.engine_version
}
resource "aws_rds_cluster_instance" "primary" {
identifier = "%[2]s"
cluster_identifier = aws_rds_cluster.primary.id
instance_class = "db.r4.large" # only db.r4 or db.r5 are valid for Aurora global db
engine = aws_rds_cluster.primary.engine
engine_version = aws_rds_cluster.primary.engine_version
}
resource "aws_vpc" "alternate" {
provider = "awsalternate"
cidr_block = "10.0.0.0/16"
tags = {
Name = "%[3]s"
}
}
resource "aws_subnet" "alternate" {
provider = "awsalternate"
count = 3
vpc_id = aws_vpc.alternate.id
availability_zone = data.aws_availability_zones.alternate.names[count.index]
cidr_block = "10.0.${count.index}.0/24"
tags = {
Name = "%[3]s"
}
}
resource "aws_db_subnet_group" "alternate" {
provider = "awsalternate"
name = "%[3]s"
subnet_ids = aws_subnet.alternate[*].id
}
resource "aws_rds_cluster" "secondary" {
provider = "awsalternate"
cluster_identifier = "%[3]s"
db_subnet_group_name = aws_db_subnet_group.alternate.name
skip_final_snapshot = true
source_region = data.aws_region.current.name
global_cluster_identifier = aws_rds_global_cluster.test.id
engine = aws_rds_global_cluster.test.engine
engine_version = aws_rds_global_cluster.test.engine_version
depends_on = [aws_rds_cluster_instance.primary]
lifecycle {
ignore_changes = [
replication_source_identifier,
]
}
}
resource "aws_rds_cluster_instance" "secondary" {
provider = "awsalternate"
identifier = "%[3]s"
cluster_identifier = aws_rds_cluster.secondary.id
instance_class = "db.r4.large" # only db.r4 or db.r5 are valid for Aurora global db
engine = aws_rds_cluster.secondary.engine
engine_version = aws_rds_cluster.secondary.engine_version
}
`, rNameGlobal, rNamePrimary, rNameSecondary))
}

func testAccAWSRDSClusterConfig_ScalingConfiguration(rName string, autoPause bool, maxCapacity, minCapacity, secondsUntilAutoPause int, timeoutAction string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
Expand Down

0 comments on commit cc040d1

Please sign in to comment.