Skip to content

Commit

Permalink
Pull MakeEmptyBlockSuppressFunc to common package
Browse files Browse the repository at this point in the history
  • Loading branch information
nfx committed May 31, 2021
1 parent cbd0769 commit 91b34f3
Show file tree
Hide file tree
Showing 8 changed files with 58 additions and 42 deletions.
11 changes: 11 additions & 0 deletions common/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,14 @@ func (r Resource) ToResource() *schema.Resource {
Timeouts: r.Timeouts,
}
}

func MakeEmptyBlockSuppressFunc(name string) func(k, old, new string, d *schema.ResourceData) bool {
return func(k, old, new string, d *schema.ResourceData) bool {
log.Printf("[DEBUG] k='%v', old='%v', new='%v'", k, old, new)
if k == name && old == "1" && new == "0" {
log.Printf("[DEBUG] Disable removal of empty block")
return true
}
return false
}
}
6 changes: 3 additions & 3 deletions compute/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ func resourceClusterSchema() map[string]*schema.Schema {
s["aws_attributes"].ConflictsWith = []string{"azure_attributes", "gcp_attributes"}
s["azure_attributes"].ConflictsWith = []string{"aws_attributes", "gcp_attributes"}
s["gcp_attributes"].ConflictsWith = []string{"aws_attributes", "azure_attributes"}
s["aws_attributes"].DiffSuppressFunc = makeEmptyBlockSuppressFunc("aws_attributes.#")
s["azure_attributes"].DiffSuppressFunc = makeEmptyBlockSuppressFunc("azure_attributes.#")
s["gcp_attributes"].DiffSuppressFunc = makeEmptyBlockSuppressFunc("gcp_attributes.#")
s["aws_attributes"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("aws_attributes.#")
s["azure_attributes"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("azure_attributes.#")
s["gcp_attributes"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("gcp_attributes.#")

s["is_pinned"] = &schema.Schema{
Type: schema.TypeBool,
Expand Down
16 changes: 2 additions & 14 deletions compute/resource_instance_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package compute

import (
"context"
"log"

"github.com/databrickslabs/terraform-provider-databricks/common"

Expand Down Expand Up @@ -54,17 +53,6 @@ func (a InstancePoolsAPI) Delete(instancePoolID string) error {
}, nil)
}

func makeEmptyBlockSuppressFunc(name string) func(k, old, new string, d *schema.ResourceData) bool {
return func(k, old, new string, d *schema.ResourceData) bool {
log.Printf("[DEBUG] k='%v', old='%v', new='%v'", k, old, new)
if k == name && old == "1" && new == "0" {
log.Printf("[DEBUG] Disable removal of empty block")
return true
}
return false
}
}

// ResourceInstancePool ...
func ResourceInstancePool() *schema.Resource {
s := common.StructToSchema(InstancePool{}, func(s map[string]*schema.Schema) map[string]*schema.Schema {
Expand All @@ -79,8 +67,8 @@ func ResourceInstancePool() *schema.Resource {
s["enable_elastic_disk"].Default = true
s["aws_attributes"].ConflictsWith = []string{"azure_attributes"}
s["azure_attributes"].ConflictsWith = []string{"aws_attributes"}
s["aws_attributes"].DiffSuppressFunc = makeEmptyBlockSuppressFunc("aws_attributes.#")
s["azure_attributes"].DiffSuppressFunc = makeEmptyBlockSuppressFunc("azure_attributes.#")
s["aws_attributes"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("aws_attributes.#")
s["azure_attributes"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("azure_attributes.#")
if v, err := common.SchemaPath(s, "aws_attributes", "availability"); err == nil {
v.ForceNew = true
v.Default = AwsAvailabilitySpot
Expand Down
8 changes: 4 additions & 4 deletions compute/resource_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,16 +132,16 @@ var jobSchema = common.StructToSchema(JobSettings{},
}

if v, err := common.SchemaPath(s, "new_cluster", "aws_attributes"); err == nil {
v.DiffSuppressFunc = makeEmptyBlockSuppressFunc("new_cluster.0.aws_attributes.#")
v.DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("new_cluster.0.aws_attributes.#")
}
if v, err := common.SchemaPath(s, "new_cluster", "azure_attributes"); err == nil {
v.DiffSuppressFunc = makeEmptyBlockSuppressFunc("new_cluster.0.azure_attributes.#")
v.DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("new_cluster.0.azure_attributes.#")
}
if v, err := common.SchemaPath(s, "new_cluster", "gcp_attributes"); err == nil {
v.DiffSuppressFunc = makeEmptyBlockSuppressFunc("new_cluster.0.gcp_attributes.#")
v.DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("new_cluster.0.gcp_attributes.#")
}

s["email_notifications"].DiffSuppressFunc = makeEmptyBlockSuppressFunc("email_notifications.#")
s["email_notifications"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("email_notifications.#")

s["name"].Description = "An optional name for the job. The default value is Untitled."
s["library"].Description = "An optional list of libraries to be installed on " +
Expand Down
16 changes: 8 additions & 8 deletions compute/resource_pipeline_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,10 @@ func TestResourcePipelineCreate(t *testing.T) {
"cluster_tag1" = "cluster_value1"
}
}
libraries {
library {
jar = "dbfs:/pipelines/code/abcde.jar"
}
libraries {
library {
maven {
coordinates = "com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.18"
}
Expand Down Expand Up @@ -131,7 +131,7 @@ func TestResourcePipelineCreate_Error(t *testing.T) {
Resource: ResourcePipeline(),
HCL: `name = "test"
storage = "/test/storage"
libraries {
library {
jar = "jar"
}
filters {
Expand Down Expand Up @@ -180,7 +180,7 @@ func TestResourcePipelineCreate_ErrorWhenWaitingFailedCleanup(t *testing.T) {
Resource: ResourcePipeline(),
HCL: `name = "test"
storage = "/test/storage"
libraries {
library {
jar = "jar"
}
filters {
Expand Down Expand Up @@ -229,7 +229,7 @@ func TestResourcePipelineCreate_ErrorWhenWaitingSuccessfulCleanup(t *testing.T)
Resource: ResourcePipeline(),
HCL: `name = "test"
storage = "/test/storage"
libraries {
library {
jar = "jar"
}
filters {
Expand Down Expand Up @@ -356,7 +356,7 @@ func TestResourcePipelineUpdate(t *testing.T) {
Resource: ResourcePipeline(),
HCL: `name = "test"
storage = "/test/storage"
libraries {
library {
maven {
coordinates = "coordinates"
}
Expand Down Expand Up @@ -387,7 +387,7 @@ func TestResourcePipelineUpdate_Error(t *testing.T) {
Resource: ResourcePipeline(),
HCL: `name = "test"
storage = "/test/storage"
libraries {
library {
maven {
coordinates = "coordinates"
}
Expand Down Expand Up @@ -439,7 +439,7 @@ func TestResourcePipelineUpdate_FailsAfterUpdate(t *testing.T) {
Resource: ResourcePipeline(),
HCL: `name = "test"
storage = "/test/storage"
libraries {
library {
maven {
coordinates = "coordinates"
}
Expand Down
6 changes: 3 additions & 3 deletions docs/resources/sql_endpoint.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ The following arguments are supported:

* `name` - (Required) Name of the SQL endpoint. Must be unique.
* `cluster_size` - (Required) The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
* `min_num_clusters` - Minimum number of clusters available when a SQL endpoint is running. The default is 1.
* `max_num_clusters` - Maximum number of clusters available when a SQL endpoint is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
* `min_num_clusters` - Minimum number of clusters available when a SQL endpoint is running. The default is `1`.
* `max_num_clusters` - Maximum number of clusters available when a SQL endpoint is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`.
* `auto_stop_mins` - Time in minutes until an idle SQL endpoint terminates all clusters and stops. This field is optional. The default is 0, which means auto stop is disabled.
* `instance_profile_arn` - [databricks_instance_profile](instance_profile.md) used to access storage from the SQL endpoint. This field is optional.
* `tags` - Databricks tags all endpoint resources with these tags.
* `spot_instance_policy` - The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional.
* `spot_instance_policy` - The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`.
* `enable_photon` - Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional.

## Attribute Reference
Expand Down
5 changes: 5 additions & 0 deletions sqlanalytics/resource_sql_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,11 +180,16 @@ func (a SQLEndpointsAPI) Delete(endpointID string) error {
func ResourceSQLEndpoint() *schema.Resource {
s := common.StructToSchema(SQLEndpoint{}, func(
m map[string]*schema.Schema) map[string]*schema.Schema {
m["auto_stop_mins"].Default = 120
m["cluster_size"].ValidateDiagFunc = validation.ToDiagFunc(
validation.StringInSlice(ClusterSizes, false))
m["max_num_clusters"].Default = 1
m["max_num_clusters"].ValidateDiagFunc = validation.ToDiagFunc(
validation.IntBetween(1, MaxNumClusters))
m["min_num_clusters"].Default = 1
m["num_clusters"].Default = 1
m["spot_instance_policy"].Default = "COST_OPTIMIZED"
m["tags"].DiffSuppressFunc = common.MakeEmptyBlockSuppressFunc("tags.#")
return m
})
return common.Resource{
Expand Down
32 changes: 22 additions & 10 deletions sqlanalytics/resource_sql_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,13 @@ func TestResourceSQLEndpointCreate(t *testing.T) {
Method: "POST",
Resource: "/api/2.0/sql/endpoints",
ExpectedRequest: SQLEndpoint{
Name: "foo",
ClusterSize: "Small",
MaxNumClusters: 1,
Name: "foo",
ClusterSize: "Small",
MaxNumClusters: 1,
AutoStopMinutes: 120,
MinNumClusters: 1,
NumClusters: 1,
SpotInstancePolicy: "COST_OPTIMIZED",
},
Response: SQLEndpoint{
ID: "abc",
Expand Down Expand Up @@ -113,9 +117,13 @@ func TestResourceSQLEndpointCreate_ErrorDisabled(t *testing.T) {
Method: "POST",
Resource: "/api/2.0/sql/endpoints",
ExpectedRequest: SQLEndpoint{
Name: "foo",
ClusterSize: "Small",
MaxNumClusters: 1,
Name: "foo",
ClusterSize: "Small",
AutoStopMinutes: 120,
MaxNumClusters: 1,
MinNumClusters: 1,
NumClusters: 1,
SpotInstancePolicy: "COST_OPTIMIZED",
},
Status: 404,
Response: common.APIError{
Expand Down Expand Up @@ -169,10 +177,14 @@ func TestResourceSQLEndpointUpdate(t *testing.T) {
Method: "POST",
Resource: "/api/2.0/sql/endpoints/abc/edit",
ExpectedRequest: SQLEndpoint{
ID: "abc",
Name: "foo",
ClusterSize: "Small",
MaxNumClusters: 1,
ID: "abc",
Name: "foo",
ClusterSize: "Small",
AutoStopMinutes: 120,
MaxNumClusters: 1,
MinNumClusters: 1,
NumClusters: 1,
SpotInstancePolicy: "COST_OPTIMIZED",
},
},
{
Expand Down

0 comments on commit 91b34f3

Please sign in to comment.