Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Bug:] azurerm_synapse_sql_pool - correct the behavior of geo_backup_policy_enabled #23217

Merged
merged 11 commits into from
Sep 22, 2023
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ resource "azurerm_synapse_sql_pool" "test" {
synapse_workspace_id = azurerm_synapse_workspace.test.id
sku_name = "DW100c"
create_mode = "Default"
storage_account_type = "GRS"
}

resource "azurerm_stream_analytics_output_synapse" "test" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ provider "azurerm" {
}

resource "azurerm_resource_group" "test" {
name = "acctestsw%[1]d"
name = "acctestRG-synapse-%[1]d"
location = "%[2]s"
}

Expand Down Expand Up @@ -224,6 +224,7 @@ resource "azurerm_synapse_sql_pool" "test" {
synapse_workspace_id = azurerm_synapse_workspace.test.id
sku_name = "DW100c"
create_mode = "Default"
storage_account_type = "GRS"
}

resource "azurerm_storage_account" "test" {
Expand Down
84 changes: 80 additions & 4 deletions internal/services/synapse/synapse_sql_pool_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/Azure/go-autorest/autorest/date"
"github.com/hashicorp/terraform-provider-azurerm/helpers/tf"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/features"
mssqlParse "github.com/hashicorp/terraform-provider-azurerm/internal/services/mssql/parse"
mssqlValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/mssql/validate"
"github.com/hashicorp/terraform-provider-azurerm/internal/services/synapse/parse"
Expand All @@ -31,7 +32,7 @@ const (
)

func resourceSynapseSqlPool() *pluginsdk.Resource {
return &pluginsdk.Resource{
resource := &pluginsdk.Resource{
Create: resourceSynapseSqlPoolCreate,
Read: resourceSynapseSqlPoolRead,
Update: resourceSynapseSqlPoolUpdate,
Expand Down Expand Up @@ -168,7 +169,49 @@ func resourceSynapseSqlPool() *pluginsdk.Resource {

"tags": tags.Schema(),
},

CustomizeDiff: pluginsdk.CustomizeDiffShim(synapseSqlPoolCustomizeDiff),
}

if !features.FourPointOhBeta() {
// NOTE: In v3.0 providers this will be an Optional field with a 'Default'
// of 'GRS' to match the previous providers behavior...
resource.Schema["storage_account_type"] = &pluginsdk.Schema{
Type: pluginsdk.TypeString,
Default: string(synapse.StorageAccountTypeGRS),
Optional: true,
ValidateFunc: validation.StringInSlice([]string{
string(synapse.StorageAccountTypeLRS),
string(synapse.StorageAccountTypeGRS),
}, false),
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this will be safe to make ForceNew too, since this can only ever have been GRS in the past?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since we have an Optional field here with a Default of the only value it could have been in the past, we'd be fine to simply set ForceNew on the schema and not worry about the state (since the value read from the API will match the Default value, the state will just be updated as there's no diff)

} else {
resource.Schema["storage_account_type"] = &pluginsdk.Schema{
Type: pluginsdk.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
string(synapse.StorageAccountTypeLRS),
string(synapse.StorageAccountTypeGRS),
}, false),
}
}

return resource
}

func synapseSqlPoolCustomizeDiff(ctx context.Context, d *pluginsdk.ResourceDiff, v interface{}) error {
_, value := d.GetChange("geo_backup_policy_enabled")
geoBackupEnabled := value.(bool)

_, value = d.GetChange("storage_account_type")
storageAccountType := synapse.StorageAccountType(value.(string))

if storageAccountType == synapse.StorageAccountTypeLRS && geoBackupEnabled {
return fmt.Errorf("`geo_backup_policy_enabled` cannot be `true` if the `storage_account_type` is `LRS`")
}

return nil
}

func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) error {
Expand All @@ -191,6 +234,7 @@ func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) e
return fmt.Errorf("checking for presence of existing %s: %+v", id, err)
}
}

if !utils.ResponseWasNotFound(existing.Response) {
return tf.ImportAsExistsError("azurerm_synapse_sql_pool", id.ID())
}
Expand All @@ -200,11 +244,15 @@ func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) e
return fmt.Errorf("retrieving %s: %+v", workspaceId, err)
}

geoBackupEnabled := d.Get("geo_backup_policy_enabled").(bool)
storageAccountType := synapse.StorageAccountType(d.Get("storage_account_type").(string))

mode := d.Get("create_mode").(string)
sqlPoolInfo := synapse.SQLPool{
Location: workspace.Location,
SQLPoolResourceProperties: &synapse.SQLPoolResourceProperties{
CreateMode: synapse.CreateMode(*utils.String(mode)),
CreateMode: synapse.CreateMode(*utils.String(mode)),
StorageAccountType: storageAccountType,
},
Sku: &synapse.Sku{
Name: utils.String(d.Get("sku_name").(string)),
Expand All @@ -217,21 +265,26 @@ func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) e
sqlPoolInfo.SQLPoolResourceProperties.Collation = utils.String(d.Get("collation").(string))
case RecoveryCreateMode:
recoveryDatabaseId := constructSourceDatabaseId(d.Get("recovery_database_id").(string))

if recoveryDatabaseId == "" {
return fmt.Errorf("`recovery_database_id` must be set when `create_mode` is %q", RecoveryCreateMode)
}

sqlPoolInfo.SQLPoolResourceProperties.RecoverableDatabaseID = utils.String(recoveryDatabaseId)
case PointInTimeRestoreCreateMode:
restore := d.Get("restore").([]interface{})
if len(restore) == 0 || restore[0] == nil {
return fmt.Errorf("`restore` block must be set when `create_mode` is %q", PointInTimeRestoreCreateMode)
}

v := restore[0].(map[string]interface{})
sourceDatabaseId := constructSourceDatabaseId(v["source_database_id"].(string))
vTime, parseErr := date.ParseTime(time.RFC3339, v["point_in_time"].(string))

if parseErr != nil {
return fmt.Errorf("parsing time format: %+v", parseErr)
}

sqlPoolInfo.SQLPoolResourceProperties.RestorePointInTime = &date.Time{Time: vTime}
sqlPoolInfo.SQLPoolResourceProperties.SourceDatabaseID = utils.String(sourceDatabaseId)
}
Expand All @@ -240,6 +293,7 @@ func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) e
if err != nil {
return fmt.Errorf("creating %s: %+v", id, err)
}

if err = future.WaitForCompletionRef(ctx, sqlClient.Client); err != nil {
return fmt.Errorf("waiting for creation of %s: %+v", id, err)
}
Expand All @@ -250,12 +304,15 @@ func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) e
Status: synapse.TransparentDataEncryptionStatusEnabled,
},
}

if _, err := sqlPTDEClient.CreateOrUpdate(ctx, id.ResourceGroup, id.WorkspaceName, id.Name, parameter); err != nil {
return fmt.Errorf("setting `data_encrypted`: %+v", err)
}
}

if !d.Get("geo_backup_policy_enabled").(bool) {
// Only update the Geo Backup Policy if it has been disabled since it is
// already enabled by default...
if !geoBackupEnabled {
geoBackupParams := synapse.GeoBackupPolicy{
GeoBackupPolicyProperties: &synapse.GeoBackupPolicyProperties{
State: synapse.GeoBackupPolicyStateDisabled,
Expand All @@ -273,8 +330,8 @@ func resourceSynapseSqlPoolCreate(d *pluginsdk.ResourceData, meta interface{}) e

func resourceSynapseSqlPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) error {
sqlClient := meta.(*clients.Client).Synapse.SqlPoolClient
sqlPTDEClient := meta.(*clients.Client).Synapse.SqlPoolTransparentDataEncryptionClient
geoBackUpClient := meta.(*clients.Client).Synapse.SqlPoolGeoBackupPoliciesClient
sqlPTDEClient := meta.(*clients.Client).Synapse.SqlPoolTransparentDataEncryptionClient
ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d)
defer cancel()

Expand All @@ -283,6 +340,15 @@ func resourceSynapseSqlPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) e
return err
}

// We only need to do this check for v3.0 providers since in v4.0
// this field will be Required ForceNew...
if !features.FourPointOhBeta() {
if d.HasChange("storage_account_type") {
oldStorageType, newStorageType := d.GetChange("storage_account_type")
return fmt.Errorf("the `storage_account_type` cannot be changed once it has been set, was `%s` got `%s`", oldStorageType.(string), newStorageType.(string))
}
}

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we make the property ForceNew as described above, we can remove this.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed.

if d.HasChange("data_encrypted") {
status := synapse.TransparentDataEncryptionStatusDisabled
if d.Get("data_encrypted").(bool) {
Expand All @@ -294,6 +360,7 @@ func resourceSynapseSqlPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) e
Status: status,
},
}

if _, err := sqlPTDEClient.CreateOrUpdate(ctx, id.ResourceGroup, id.WorkspaceName, id.Name, parameter); err != nil {
return fmt.Errorf("updating `data_encrypted`: %+v", err)
}
Expand All @@ -310,6 +377,7 @@ func resourceSynapseSqlPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) e
State: state,
},
}

if _, err := geoBackUpClient.CreateOrUpdate(ctx, id.ResourceGroup, id.WorkspaceName, id.Name, geoBackupParams); err != nil {
return fmt.Errorf("updating `geo_backup_policy_enabled`: %+v", err)
}
Expand All @@ -333,6 +401,7 @@ func resourceSynapseSqlPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) e
if !ok {
return fmt.Errorf("internal-error: context had no deadline")
}

stateConf := &pluginsdk.StateChangeConf{
Pending: []string{
"Scaling",
Expand All @@ -351,6 +420,7 @@ func resourceSynapseSqlPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) e
}
}
}

return resourceSynapseSqlPoolRead(d, meta)
}

Expand Down Expand Up @@ -393,8 +463,10 @@ func resourceSynapseSqlPoolRead(d *pluginsdk.ResourceData, meta interface{}) err
if resp.Sku != nil {
d.Set("sku_name", resp.Sku.Name)
}

if props := resp.SQLPoolResourceProperties; props != nil {
d.Set("collation", props.Collation)
d.Set("storage_account_type", props.StorageAccountType)
}

geoBackupEnabled := true
Expand Down Expand Up @@ -431,6 +503,7 @@ func resourceSynapseSqlPoolDelete(d *pluginsdk.ResourceData, meta interface{}) e
if err = future.WaitForCompletionRef(ctx, sqlClient.Client); err != nil {
return fmt.Errorf("waiting for deletion of %s: %+v", *id, err)
}

return nil
}

Expand All @@ -440,9 +513,11 @@ func synapseSqlPoolScaleStateRefreshFunc(ctx context.Context, client *synapse.SQ
if err != nil {
return resp, "failed", err
}

if resp.SQLPoolResourceProperties == nil || resp.SQLPoolResourceProperties.Status == nil {
return resp, "failed", nil
}

return resp, *resp.SQLPoolResourceProperties.Status, nil
}
}
Expand All @@ -455,5 +530,6 @@ func constructSourceDatabaseId(id string) string {
if err != nil {
return id
}

return mssqlParse.NewDatabaseID(sqlPoolId.SubscriptionId, sqlPoolId.ResourceGroup, sqlPoolId.WorkspaceName, sqlPoolId.Name).ID()
}
Loading