Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dynamodb/table_replica: Fix creation error when KMS #29102

Merged
merged 17 commits into from
Jan 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 39 additions & 0 deletions .changelog/29102.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
```release-note:bug
resource/aws_dynamodb_table: Fix to allow updating of `replica.*.point_in_time_recovery` when a `replica` has `kms_key_arn` set
```

```release-note:bug
resource/aws_dynamodb_table: Fix to allow updating of `replica.*.kms_key_arn`
```

```release-note:bug
resource/aws_dynamodb_table: Fix perpetual diffs when using default AWS-managed keys
```

```release-note:note
resource/aws_dynamodb_table: Updating `replica.*.kms_key_arn` or `replica.*.point_in_time_recovery`, when the `replica`'s `kms_key_arn` is set, requires recreating the replica.
```

```release-note:note
resource/aws_dynamodb_table_replica: Updating `kms_key_arn` forces replacement of the replica now as required to re-encrypt the replica
```

```release-note:note
resource/aws_dynamodb_table: In the past, in certain situations, `server_side_encryption.0.kms_key_arn` or `replica.*.kms_key_arn` could be populated with the default DynamoDB key `alias/aws/dynamodb`. This was an error because it would then be sent back to AWS and should not be.
```

```release-note:note
resource/aws_dynamodb_table: In the past, in certain situations, `kms_key_arn` could be populated with the default DynamoDB key `alias/aws/dynamodb`. This was an error because it would then be sent back to AWS and should not be.
```

```release-note:note
resource/aws_dynamodb_table_replica: Updating `kms_key_arn` forces replacement of the replica now as required to re-encrypt the replica
```

```release-note:bug
resource/aws_dynamodb_table_replica: Fix to allow updating of `kms_key_arn`
```

```release-note:bug
resource/aws_dynamodb_table_replica: Fix to allow creation of the replica without errors when `kms_key_arn` is set
```
113 changes: 87 additions & 26 deletions internal/service/dynamodb/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/hashicorp/terraform-provider-aws/internal/create"
"github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag"
"github.com/hashicorp/terraform-provider-aws/internal/flex"
"github.com/hashicorp/terraform-provider-aws/internal/service/kms"
tftags "github.com/hashicorp/terraform-provider-aws/internal/tags"
"github.com/hashicorp/terraform-provider-aws/internal/tfresource"
"github.com/hashicorp/terraform-provider-aws/internal/verify"
Expand Down Expand Up @@ -51,7 +52,6 @@ func ResourceTable() *schema.Resource {
Update: schema.DefaultTimeout(updateTableTimeoutTotal),
},

//TODO: Add a custom diff if it is just the kms keys changing maybe check the replica update function?
CustomizeDiff: customdiff.All(
func(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error {
return validStreamSpec(diff)
Expand Down Expand Up @@ -265,6 +265,7 @@ func ResourceTable() *schema.Resource {
Optional: true,
Computed: true,
ValidateFunc: verify.ValidARN,
// update is equivalent of force a new *replica*, not table
},
"point_in_time_recovery": {
Type: schema.TypeBool,
Expand All @@ -279,6 +280,7 @@ func ResourceTable() *schema.Resource {
"region_name": {
Type: schema.TypeString,
Required: true,
// update is equivalent of force a new *replica*, not table
},
},
},
Expand Down Expand Up @@ -651,7 +653,10 @@ func resourceTableRead(ctx context.Context, d *schema.ResourceData, meta interfa
d.Set("stream_arn", table.LatestStreamArn)
d.Set("stream_label", table.LatestStreamLabel)

if err := d.Set("server_side_encryption", flattenTableServerSideEncryption(table.SSEDescription)); err != nil {
sse := flattenTableServerSideEncryption(table.SSEDescription)
sse = clearSSEDefaultKey(ctx, sse, meta)

if err := d.Set("server_side_encryption", sse); err != nil {
return create.DiagSettingError(names.DynamoDB, ResNameTable, d.Id(), "server_side_encryption", err)
}

Expand All @@ -662,6 +667,7 @@ func resourceTableRead(ctx context.Context, d *schema.ResourceData, meta interfa
}

replicas = addReplicaTagPropagates(d.Get("replica").(*schema.Set), replicas)
replicas = clearReplicaDefaultKeys(ctx, replicas, meta)

if err := d.Set("replica", replicas); err != nil {
return create.DiagSettingError(names.DynamoDB, ResNameTable, d.Id(), "replica", err)
Expand Down Expand Up @@ -1123,6 +1129,9 @@ func createReplicas(ctx context.Context, conn *dynamodb.DynamoDB, tableName stri
if tfawserr.ErrMessageContains(err, dynamodb.ErrCodeLimitExceededException, "can be created, updated, or deleted simultaneously") {
return resource.RetryableError(err)
}
if tfawserr.ErrMessageContains(err, "ValidationException", "Replica specified in the Replica Update or Replica Delete action of the request was not found") {
return resource.RetryableError(err)
}
if tfawserr.ErrCodeEquals(err, dynamodb.ErrCodeResourceInUseException) {
return resource.RetryableError(err)
}
Expand Down Expand Up @@ -1273,29 +1282,41 @@ func updateReplica(ctx context.Context, d *schema.ResourceData, conn *dynamodb.D
oRaw, nRaw := d.GetChange("replica")
o := oRaw.(*schema.Set)
n := nRaw.(*schema.Set)
newRegions := replicaRegions(n)
oldRegions := replicaRegions(o)
// If it is a change in KMS keys causing the diff no updates needed
if reflect.DeepEqual(oldRegions, newRegions) {
return nil
}

removed := o.Difference(n).List()
added := n.Difference(o).List()

// 1. changing replica kms keys requires recreation of the replica, like ForceNew, but we don't want to ForceNew the *table*
// 2. also, in order to update PITR if a replica is encrypted (has KMS key), it requires recreation (how'd u recover from a backup encrypted with a different key?)

var removeFirst []interface{} // replicas to delete before recreating (like ForceNew without recreating table)

// For true updates, don't remove and add, just update (i.e., keep in added
// but remove from removed)
for _, a := range added {
for j, r := range removed {
ma := a.(map[string]interface{})
mr := r.(map[string]interface{})

if ma["region_name"].(string) == mr["region_name"].(string) && (ma["kms_key_arn"].(string) != "" || mr["kms_key_arn"].(string) != "") {
removeFirst = append(removeFirst, removed[j])
removed = append(removed[:j], removed[j+1:]...)
continue
}

if ma["region_name"].(string) == mr["region_name"].(string) {
removed = append(removed[:j], removed[j+1:]...)
continue
}
}
}

if len(removeFirst) > 0 { // like ForceNew but doesn't recreate the table
if err := deleteReplicas(ctx, conn, d.Id(), removeFirst, d.Timeout(schema.TimeoutUpdate)); err != nil {
return fmt.Errorf("updating replicas, while deleting: %w", err)
}
}

if len(added) > 0 {
if err := createReplicas(ctx, conn, d.Id(), added, tfVersion, true, d.Timeout(schema.TimeoutUpdate)); err != nil {
return fmt.Errorf("updating replicas, while creating: %w", err)
Expand All @@ -1311,18 +1332,6 @@ func updateReplica(ctx context.Context, d *schema.ResourceData, conn *dynamodb.D
return nil
}

func replicaRegions(r *schema.Set) []string {
var regions []string
for _, tfMapRaw := range r.List() {
tfMap, ok := tfMapRaw.(map[string]interface{})
if !ok {
continue
}
regions = append(regions, tfMap["region_name"].(string))
}
return regions
}

func UpdateDiffGSI(oldGsi, newGsi []interface{}, billingMode string) (ops []*dynamodb.GlobalSecondaryIndexUpdate, e error) {
// Transform slices into maps
oldGsis := make(map[string]interface{})
Expand Down Expand Up @@ -1528,9 +1537,8 @@ func deleteReplicas(ctx context.Context, conn *dynamodb.DynamoDB, tableName stri
}

func replicaPITR(ctx context.Context, conn *dynamodb.DynamoDB, tableName string, region string, tfVersion string) (bool, error) {
// At a future time, replicas should probably have a separate resource because,
// to manage them, you need connections from the different regions. However, they
// have to be created from the starting/main region...
// To manage replicas you need connections from the different regions. However, they
// have to be created from the starting/main region.
session, err := conns.NewSessionForRegion(&conn.Config, region, tfVersion)
if err != nil {
return false, fmt.Errorf("new session for replica (%s) PITR: %w", region, err)
Expand Down Expand Up @@ -1564,9 +1572,7 @@ func replicaPITR(ctx context.Context, conn *dynamodb.DynamoDB, tableName string,

func addReplicaPITRs(ctx context.Context, conn *dynamodb.DynamoDB, tableName string, tfVersion string, replicas []interface{}) ([]interface{}, error) {
// This non-standard approach is needed because PITR info for a replica
// must come from a region-specific connection. A future table_replica
// resource may improve this.

// must come from a region-specific connection.
for i, replicaRaw := range replicas {
replica := replicaRaw.(map[string]interface{})

Expand Down Expand Up @@ -1613,6 +1619,61 @@ func addReplicaTagPropagates(configReplicas *schema.Set, replicas []interface{})
return replicas
}

// clearSSEDefaultKey sets the kms_key_arn to "" if it is the default key alias/aws/dynamodb.
// Not clearing the key causes diff problems and sends the key to AWS when it should not be.
func clearSSEDefaultKey(ctx context.Context, sseList []interface{}, meta interface{}) []interface{} {
if len(sseList) == 0 {
return sseList
}

sse := sseList[0].(map[string]interface{})

dk, err := kms.FindDefaultKey(ctx, "dynamodb", meta.(*conns.AWSClient).Region, meta)
if err != nil {
return sseList
}

if sse["kms_key_arn"].(string) == dk {
sse["kms_key_arn"] = ""
return []interface{}{sse}
}

return sseList
}

// clearReplicaDefaultKeys sets a replica's kms_key_arn to "" if it is the default key alias/aws/dynamodb for
// the replica's region. Not clearing the key causes diff problems and sends the key to AWS when it should not be.
func clearReplicaDefaultKeys(ctx context.Context, replicas []interface{}, meta interface{}) []interface{} {
if len(replicas) == 0 {
return replicas
}

for i, replicaRaw := range replicas {
replica := replicaRaw.(map[string]interface{})

if v, ok := replica["kms_key_arn"].(string); !ok || v == "" {
continue
}

if v, ok := replica["region_name"].(string); !ok || v == "" {
continue
}

dk, err := kms.FindDefaultKey(ctx, "dynamodb", replica["region_name"].(string), meta)
if err != nil {
continue
}

if replica["kms_key_arn"].(string) == dk {
replica["kms_key_arn"] = ""
}

replicas[i] = replica
}

return replicas
}

// flatteners, expanders

func flattenTableAttributeDefinitions(definitions []*dynamodb.AttributeDefinition) []interface{} {
Expand Down
42 changes: 28 additions & 14 deletions internal/service/dynamodb/table_replica.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/hashicorp/terraform-provider-aws/internal/conns"
"github.com/hashicorp/terraform-provider-aws/internal/create"
"github.com/hashicorp/terraform-provider-aws/internal/service/kms"
tftags "github.com/hashicorp/terraform-provider-aws/internal/tags"
"github.com/hashicorp/terraform-provider-aws/internal/tfresource"
"github.com/hashicorp/terraform-provider-aws/internal/verify"
Expand Down Expand Up @@ -68,6 +69,7 @@ func ResourceTableReplica() *schema.Resource {
Optional: true,
Computed: true,
ValidateFunc: verify.ValidARN,
ForceNew: true,
},
"point_in_time_recovery": { // direct to replica
Type: schema.TypeBool,
Expand Down Expand Up @@ -128,11 +130,9 @@ func resourceTableReplicaCreate(ctx context.Context, d *schema.ResourceData, met

input := &dynamodb.UpdateTableInput{
TableName: aws.String(tableName),
ReplicaUpdates: []*dynamodb.ReplicationGroupUpdate{
{
Create: replicaInput,
},
},
ReplicaUpdates: []*dynamodb.ReplicationGroupUpdate{{
Create: replicaInput,
}},
}

err = resource.RetryContext(ctx, maxDuration(replicaUpdateTimeout, d.Timeout(schema.TimeoutCreate)), func() *resource.RetryError {
Expand Down Expand Up @@ -250,7 +250,16 @@ func resourceTableReplicaRead(ctx context.Context, d *schema.ResourceData, meta
return create.DiagError(names.DynamoDB, create.ErrActionReading, ResNameTableReplica, d.Id(), err)
}

d.Set("kms_key_arn", replica.KMSMasterKeyId)
dk, err := kms.FindDefaultKey(ctx, "dynamodb", replicaRegion, meta)
if err != nil {
return create.DiagError(names.DynamoDB, create.ErrActionReading, ResNameTableReplica, d.Id(), err)
}

if replica.KMSMasterKeyId == nil || aws.StringValue(replica.KMSMasterKeyId) == dk {
d.Set("kms_key_arn", nil)
Comment on lines +258 to +259

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi! I'm using a default KMS key on DynamoDB, and recently I updated my AWS provider to include this change. However, I'm now seeing that Terraform keeps planning to delete/recreate my replica because it thinks the kms_key_arn is not set (and wants to set it to the default KMS key ARN). Could this change be related to that?

} else {
d.Set("kms_key_arn", replica.KMSMasterKeyId)
}

if replica.ReplicaTableClassSummary != nil {
d.Set("table_class_override", replica.ReplicaTableClassSummary.TableClass)
Expand Down Expand Up @@ -372,18 +381,23 @@ func resourceTableReplicaUpdate(ctx context.Context, d *schema.ResourceData, met
RegionName: aws.String(replicaRegion),
}

if d.HasChange("kms_key_arn") {
viaMainChanges = true
viaMainInput.KMSMasterKeyId = aws.String(d.Get("kms_key_arn").(string))
if d.HasChange("kms_key_arn") && !d.IsNewResource() { // create ends with update and sets kms_key_arn causing change that is not
dk, err := kms.FindDefaultKey(ctx, "dynamodb", replicaRegion, meta)
if err != nil {
return create.DiagError(names.DynamoDB, create.ErrActionUpdating, ResNameTableReplica, d.Id(), fmt.Errorf("region %s: %w", replicaRegion, err))
}

if d.Get("kms_key_arn").(string) != dk {
viaMainChanges = true
viaMainInput.KMSMasterKeyId = aws.String(d.Get("kms_key_arn").(string))
}
}

if viaMainChanges {
input := &dynamodb.UpdateTableInput{
ReplicaUpdates: []*dynamodb.ReplicationGroupUpdate{
{
Update: viaMainInput,
},
},
ReplicaUpdates: []*dynamodb.ReplicationGroupUpdate{{
Update: viaMainInput,
}},
TableName: aws.String(tableName),
}

Expand Down
Loading