diff --git a/CHANGELOG.md b/CHANGELOG.md index fb4d5f3381..afa33061d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,22 @@ ## 0.3.2 -* Fix incorrect escaping of notebook names ([#566](https://github.com/databrickslabs/terraform-provider-databricks/pull/566)) +* Fixed minor issues to add support for GCP ([#558](https://github.com/databrickslabs/terraform-provider-databricks/pull/558)) +* Fixed `databricks_permissions` for SQL Analytics Entities ([#535](https://github.com/databrickslabs/terraform-provider-databricks/issues/535)) +* Fixed incorrect HTTP 404 handling on create ([#564](https://github.com/databrickslabs/terraform-provider-databricks/issues/564), [#576](https://github.com/databrickslabs/terraform-provider-databricks/issues/576)) +* Fixed incorrect escaping of notebook names ([#566](https://github.com/databrickslabs/terraform-provider-databricks/pull/566)) +* Fixed entitlements for databricks_group ([#549](https://github.com/databrickslabs/terraform-provider-databricks/pull/549)) +* Fixed rate limiting to perform more than 1 request per second ([#577](https://github.com/databrickslabs/terraform-provider-databricks/pull/577)) +* Added support for spot instances on Azure ([#571](https://github.com/databrickslabs/terraform-provider-databricks/pull/571)) +* Added job schedules support for `pause_status` as a optional field. ([#575](https://github.com/databrickslabs/terraform-provider-databricks/pull/575)) +* Fixed minor documentation issues. + +Updated dependency versions: + +* Bump github.com/aws/aws-sdk-go from 1.37.20 to 1.38.10 +* Bump github.com/hashicorp/hcl/v2 from 2.9.0 to 2.9.1 +* Bump github.com/zclconf/go-cty from 1.8.0 to 1.8.1 +* Bump github.com/google/go-querystring from 1.0.0 to 1.1.0 ## 0.3.1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 91fd8c3b8f..4440b19a98 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -98,6 +98,187 @@ $ docker run -it -v $(pwd):/workpace -w /workpace databricks-terraform plan $ docker run -it -v $(pwd):/workpace -w /workpace databricks-terraform apply ``` +## Adding a new resource + +The general process for adding a new resource is: + +*Define the resource models.* The models for a resource are `struct`s defining the schemas of the objects in the Databricks REST API. Define structures used for multiple resources in a common `models.go` file; otherwise, you can define these directly in your resource file. An example model: +```go +type Field struct { + A string `json:"a,omitempty"` + AMoreComplicatedName int `json:"a_more_complicated_name,omitempty"` +} + +type Example struct { + ID string `json:"id"` + TheField *Field `json:"the_field"` + AnotherField bool `json:"another_field"` + Filters []string `json:"filters" tf:"optional"` +} +``` + +Some interesting points to note here: +* Use the `json` tag to determine the serde properties of the field. The allowed tags are defined here: https://go.googlesource.com/go/+/go1.16/src/encoding/json/encode.go#158 +* Use the custom `tf` tag indicates properties to be annotated on the Terraform schema for this struct. Supported values are: + * `optional` for optional fields + * `computed` for computed fields + * `alias:X` to use a custom name in HCL for a field + * `default:X` to set a default value for a field + * `max_items:N` to set the maximum number of items for a multi-valued parameter + * `slice_set` to indicate that a the parameter should accept a set instead of a list +* Do not use bare references to structs in the model; rather, use pointers to structs. Maps and slices are permitted, as well as the following primitive types: int, int32, int64, float64, bool, string. +See `typeToSchema` in `common/reflect_resource.go` for the up-to-date list of all supported field types and values for the `tf` tag. + +*Define the Terraform schema.* This is made easy for you by the `StructToSchema` method in the `common` package, which converts your struct automatically to a Terraform schema, accepting also a function allowing the user to post-process the automatically generated schema, if needed. +```go +var exampleSchema = common.StructToSchema(Example{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { return m }) +``` + +*Define the API client for the resource.* You will need to implement create, read, update, and delete functions. +```go +type ExampleApi struct { + client *common.DatabricksClient + ctx context.Context +} + +func NewExampleApi(ctx context.Context, m interface{}) ExampleApi { + return ExampleApi{m.(*common.DatabricksClient), ctx} +} + +func (a ExampleApi) Create(e Example) (string, error) { + var id string + err := a.client.Post(a.ctx, "/example", e, &id) + return id, err +} + +func (a ExampleApi) Read(id string) (e Example, err error) { + err = a.client.Get(a.ctx, "/example/"+id, nil, &e) + return +} + +func (a ExampleApi) Update(id string, e Example) error { + return a.client.Put(a.ctx, "/example/"+string(id), e) +} + +func (a ExampleApi) Delete(id string) error { + return a.client.Delete(a.ctx, "/pipelines/"+id, nil) +} +``` + +*Define the Resource object itself.* This is made quite simple by using the `toResource` function defined on the `Resource` type in the `common` package. A simple example: +```go +func ResourceExample() *schema.Resource { + return common.Resource{ + Schema: exampleSchema, + SchemaVersion: 2, + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var e Example + err := common.DataToStructPointer(d, exampleSchema, &e) + if err != nil { + return err + } + id, err := NewExampleApi(ctx, c).Create(e) + if err != nil { + return err + } + d.SetId(string(id)) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + i, err := NewExampleApi(ctx, c).Read(d.Id()) + if err != nil { + return err + } + return common.StructToData(i.Spec, exampleSchema, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var e Example + err := common.DataToStructPointer(d, exampleSchema, &e) + if err != nil { + return err + } + return NewExampleApi(ctx, c).Update(d.Id(), e) + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + return NewExampleApi(ctx, c).Delete(d.Id()) + }, + }.ToResource() +} +``` + +*Add the resource to the top-level provider.* Simply add the resource to the provider definition in `provider/provider.go`. + +*Write unit tests for your resource.* To write your unit tests, you can make use of `ResourceFixture` and `HTTPFixture` structs defined in the `qa` package. This starts a fake HTTP server, asserting that your resource provdier generates the correct request for a given HCL template body for your resource. An example: + +```go +func TestExampleResourceCreate(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/example", + ExpectedRequest: Example{ + TheField: Field{ + A: "test", + }, + }, + Response: map[string]interface{} { + "id": "abcd", + "the_field": map[string]interface{} { + "a": "test", + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/example/abcd", + Response: map[string]interface{}{ + "id": "abcd", + "the_field": map[string]interface{} { + "a": "test", + }, + }, + }, + }, + Create: true, + Resource: ResourceExample(), + HCL: `the_field { + a = "test" + }`, + }.Apply(t) + assert.NoError(t, err, err) + assert.Equal(t, "abcd", d.Id()) +} +``` + +*Write acceptance tests.* These are E2E tests which run terraform against the live cloud and Databricks APIs. For these, you can use the `Test` and `Step` structs defined in the `acceptance` package. An example: + +```go +func TestPreviewAccPipelineResource_CreatePipeline(t *testing.T) { + acceptance.Test(t, []acceptance.Step{ + { + Template: ` + resource "databricks_example" "this" { + the_field { + a = "test" + a_more_complicated_name = 3 + } + another_field = true + filters = [ + "a", + "b" + ] + } + `, + }, + }) +} +``` + +## Debugging + +**TF_LOG=DEBUG terraform apply** allows you to see the internal logs from `terraform apply`. + ## Testing * [Integration tests](scripts/README.md) should be run at a client level against both azure and aws to maintain sdk parity against both apis. diff --git a/Makefile b/Makefile index 4ea6399af3..3de5a4fa6c 100644 --- a/Makefile +++ b/Makefile @@ -18,8 +18,6 @@ coverage: test @echo "✓ Opening coverage for unit tests ..." @go tool cover -html=coverage.txt -VERSION = 0.3.1 - build: vendor @echo "✓ Building source code with go build ..." @go build -mod vendor -v -o terraform-provider-databricks @@ -68,6 +66,10 @@ test-awsmt: install @echo "✓ Running Terraform Acceptance Tests for AWS MT..." @/bin/bash scripts/run.sh awsmt '^(TestAcc|TestAwsAcc)' --debug --tee +test-preview: install + @echo "✓ Running acceptance Tests for Preview features..." + @/bin/bash scripts/run.sh preview '^TestPreviewAcc' --debug --tee + snapshot: @echo "✓ Making Snapshot ..." @goreleaser release --rm-dist --snapshot diff --git a/README.md b/README.md index 6fa7ef4228..a4dad2bc08 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ terraform { required_providers { databricks = { source = "databrickslabs/databricks" - version = "0.3.1" + version = "0.3.2" } } } diff --git a/common/http.go b/common/http.go index 618140a4f1..b19cd70e15 100644 --- a/common/http.go +++ b/common/http.go @@ -390,7 +390,11 @@ func (c *DatabricksClient) redactedDump(body []byte) (res string) { // error in this case is not much relevant return } - return onlyNBytes(string(rePacked), 1024) + maxBytes := 1024 + if c.DebugTruncateBytes > maxBytes { + maxBytes = c.DebugTruncateBytes + } + return onlyNBytes(string(rePacked), maxBytes) } func (c *DatabricksClient) userAgent(ctx context.Context) string { diff --git a/common/reflect_resource_test.go b/common/reflect_resource_test.go index fbeb700407..2faf07f215 100644 --- a/common/reflect_resource_test.go +++ b/common/reflect_resource_test.go @@ -358,14 +358,16 @@ func TestStructToData(t *testing.T) { // Empty optional string should not be set. { - // nolint: marked as deprecated, without viable alternative. + //lint:ignore SA1019 + // nolint _, ok := d.GetOkExists("addresses.0.optional_string") assert.Falsef(t, ok, "Empty optional string should not be set in ResourceData") } // Empty required string should be set. { - // nolint: marked as deprecated, without viable alternative. + //lint:ignore SA1019 + // nolint _, ok := d.GetOkExists("addresses.0.required_string") assert.Truef(t, ok, "Empty required string should be set in ResourceData") } diff --git a/common/resource.go b/common/resource.go index d33987dda1..22f6102931 100644 --- a/common/resource.go +++ b/common/resource.go @@ -63,12 +63,6 @@ func (r Resource) ToResource() *schema.Resource { CreateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { c := m.(*DatabricksClient) err := r.Create(ctx, d, c) - if e, ok := err.(APIError); ok && e.IsMissing() { - log.Printf("[INFO] %s[id=%s] is removed on backend", - ResourceName.GetOrUnknown(ctx), d.Id()) - d.SetId("") - return nil - } if err != nil { return diag.FromErr(err) } diff --git a/common/version.go b/common/version.go index 8e8ee0cde2..d1f4c5a3ee 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "0.3.2" + version = "0.3.3" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider diff --git a/compute/acceptance/cluster_test.go b/compute/acceptance/cluster_test.go index 52be72b2ab..468c33d99d 100644 --- a/compute/acceptance/cluster_test.go +++ b/compute/acceptance/cluster_test.go @@ -70,6 +70,9 @@ func TestAccClusterResource_CreateSingleNodeCluster(t *testing.T) { "spark.databricks.cluster.profile" = "singleNode" "spark.master" = "local[*]" } + custom_tags = { + "ResourceClass" = "SingleNode" + } {var.AWS_ATTRIBUTES} }`, }, diff --git a/compute/acceptance/pipeline_test.go b/compute/acceptance/pipeline_test.go new file mode 100644 index 0000000000..29e74be461 --- /dev/null +++ b/compute/acceptance/pipeline_test.go @@ -0,0 +1,53 @@ +package acceptance + +import ( + "testing" + + "github.com/databrickslabs/terraform-provider-databricks/internal/acceptance" +) + +func TestPreviewAccPipelineResource_CreatePipeline(t *testing.T) { + acceptance.Test(t, []acceptance.Step{ + { + Template: ` + locals { + name = "pipeline-acceptance-{var.RANDOM}" + } + resource "databricks_pipeline" "this" { + name = locals.name + storage = "/test/${locals.name}" + configuration = { + key1 = "value1" + key2 = "value2" + } + clusters { + label = "default" + num_workers = 2 + custom_tags = { + cluster_type = "default" + } + } + + cluster { + label = "maintenance" + num_workers = 1 + custom_tags = { + cluster_type = "maintenance + } + } + + library { + maven { + coordinates = "com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.7" + } + } + filters { + include = ["com.databricks.include"] + exclude = ["com.databricks.exclude"] + } + continuous = false + } + `, + }, + }) +} diff --git a/compute/clusters.go b/compute/clusters.go index c260f0b8c5..546a075a21 100644 --- a/compute/clusters.go +++ b/compute/clusters.go @@ -181,9 +181,15 @@ func (a ClustersAPI) waitForClusterStatus(clusterID string, desired ClusterState } if !clusterInfo.State.CanReach(desired) { docLink := "https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterstate" + details := "" + if clusterInfo.TerminationReason != nil { + details = fmt.Sprintf(", Termination info: code: %s, type: %s, parameters: %v", + clusterInfo.TerminationReason.Code, clusterInfo.TerminationReason.Type, + clusterInfo.TerminationReason.Parameters) + } return resource.NonRetryableError(fmt.Errorf( - "%s is not able to transition from %s to %s: %s. Please see %s for more details", - clusterID, clusterInfo.State, desired, clusterInfo.StateMessage, docLink)) + "%s is not able to transition from %s to %s: %s%s. Please see %s for more details", + clusterID, clusterInfo.State, desired, clusterInfo.StateMessage, details, docLink)) } return resource.RetryableError( fmt.Errorf("%s is %s, but has to be %s", diff --git a/compute/clusters_test.go b/compute/clusters_test.go index b8f7c06a26..ebde2d5b98 100644 --- a/compute/clusters_test.go +++ b/compute/clusters_test.go @@ -223,6 +223,8 @@ func TestWaitForClusterStatus_NotReachable(t *testing.T) { Response: ClusterInfo{ State: ClusterStateUnknown, StateMessage: "Something strange is going on", + TerminationReason: &TerminationReason{Code: "unknown", Type: "broken", + Parameters: map[string]string{"abc": "def"}}, }, }, }) @@ -234,7 +236,8 @@ func TestWaitForClusterStatus_NotReachable(t *testing.T) { ctx := context.Background() _, err = NewClustersAPI(ctx, client).waitForClusterStatus("abc", ClusterStateRunning) require.Error(t, err) - assert.Contains(t, err.Error(), "abc is not able to transition from UNKNOWN to RUNNING: Something strange is going on.") + assert.Contains(t, err.Error(), "abc is not able to transition from UNKNOWN to RUNNING: Something strange is going on") + assert.Contains(t, err.Error(), "code: unknown, type: broken") } func TestWaitForClusterStatus_NormalRetry(t *testing.T) { @@ -641,7 +644,6 @@ func TestAzureAccNodeTypes(t *testing.T) { clustersAPI := NewClustersAPI(ctx, common.CommonEnvironmentClient()) m := map[string]NodeTypeRequest{ "Standard_F4s": {}, - "Standard_NC12": {MinGPUs: 1}, "Standard_L32s_v2": {MinCores: 32, GBPerCore: 8}, } diff --git a/compute/model.go b/compute/model.go index 37d028009d..0b47f2b2db 100644 --- a/compute/model.go +++ b/compute/model.go @@ -11,8 +11,8 @@ type AutoScale struct { MaxWorkers int32 `json:"max_workers,omitempty"` } -// AwsAvailability is a type for describing AWS availability on cluster nodes -type AwsAvailability string +// Availability is a type for describing AWS availability on cluster nodes +type Availability string const ( // AwsAvailabilitySpot is spot instance type for clusters @@ -24,6 +24,17 @@ const ( AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK" ) +// https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#--azureavailability +const ( + // AzureAvailabilitySpot is spot instance type for clusters + AzureAvailabilitySpot = "SPOT_AZURE" + // AzureAvailabilityOnDemand is OnDemand instance type for clusters + AzureAvailabilityOnDemand = "ON_DEMAND_AZURE" + // AzureAvailabilitySpotWithFallback is Spot instance type for clusters with option + // to fallback into on-demand if instance cannot be acquired + AzureAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK_AZURE" +) + // AzureDiskVolumeType is disk type on azure vms type AzureDiskVolumeType string @@ -112,14 +123,29 @@ type ZonesInfo struct { // AwsAttributes encapsulates the aws attributes for aws based clusters // https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterattributes type AwsAttributes struct { - FirstOnDemand int32 `json:"first_on_demand,omitempty" tf:"computed"` - Availability AwsAvailability `json:"availability,omitempty" tf:"computed"` - ZoneID string `json:"zone_id,omitempty" tf:"computed"` - InstanceProfileArn string `json:"instance_profile_arn,omitempty"` - SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty" tf:"computed"` - EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty" tf:"computed"` - EbsVolumeCount int32 `json:"ebs_volume_count,omitempty" tf:"computed"` - EbsVolumeSize int32 `json:"ebs_volume_size,omitempty" tf:"computed"` + FirstOnDemand int32 `json:"first_on_demand,omitempty" tf:"computed"` + Availability Availability `json:"availability,omitempty" tf:"computed"` + ZoneID string `json:"zone_id,omitempty" tf:"computed"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty" tf:"computed"` + EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty" tf:"computed"` + EbsVolumeCount int32 `json:"ebs_volume_count,omitempty" tf:"computed"` + EbsVolumeSize int32 `json:"ebs_volume_size,omitempty" tf:"computed"` +} + +// AzureAttributes encapsulates the Azure attributes for Azure based clusters +// https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#clusterazureattributes +type AzureAttributes struct { + FirstOnDemand int32 `json:"first_on_demand,omitempty" tf:"computed"` + Availability Availability `json:"availability,omitempty" tf:"computed"` + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty" tf:"computed"` +} + +// GcpAttributes encapsultes GCP specific attributes +// https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#clustergcpattributes +type GcpAttributes struct { + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty" tf:"computed"` + GoogleServiceAccount string `json:"google_service_account,omitempty" tf:"computed"` } // DbfsStorageInfo contains the destination string for DBFS @@ -139,12 +165,24 @@ type S3StorageInfo struct { CannedACL string `json:"canned_acl,omitempty"` } +// LocalFileInfo represents a local file on disk, e.g. in a customer's container. +type LocalFileInfo struct { + Destination string `json:"destination,omitempty" tf:"optional"` +} + // StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant. type StorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` } +// InitScriptStorageInfo captures the allowed sources of init scripts. +type InitScriptStorageInfo struct { + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` + S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` + File *LocalFileInfo `json:"file,omitempty" tf:"optional"` +} + // SparkNodeAwsAttributes is the struct that determines if the node is a spot instance or not type SparkNodeAwsAttributes struct { IsSpot bool `json:"is_spot,omitempty"` @@ -233,21 +271,23 @@ type Cluster struct { EnableElasticDisk bool `json:"enable_elastic_disk,omitempty" tf:"computed"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` - NodeTypeID string `json:"node_type_id,omitempty" tf:"group:node_type,computed"` - DriverNodeTypeID string `json:"driver_node_type_id,omitempty" tf:"conflicts:instance_pool_id,computed"` - InstancePoolID string `json:"instance_pool_id,omitempty" tf:"group:node_type"` - PolicyID string `json:"policy_id,omitempty"` - AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty" tf:"conflicts:instance_pool_id"` - AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` + NodeTypeID string `json:"node_type_id,omitempty" tf:"group:node_type,computed"` + DriverNodeTypeID string `json:"driver_node_type_id,omitempty" tf:"conflicts:instance_pool_id,computed"` + InstancePoolID string `json:"instance_pool_id,omitempty" tf:"group:node_type"` + PolicyID string `json:"policy_id,omitempty"` + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty" tf:"conflicts:instance_pool_id"` + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty" tf:"conflicts:instance_pool_id"` + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty" tf:"conflicts:instance_pool_id"` + AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` SparkConf map[string]string `json:"spark_conf,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` - SSHPublicKeys []string `json:"ssh_public_keys,omitempty" tf:"max_items:10"` - InitScripts []StorageInfo `json:"init_scripts,omitempty" tf:"max_items:10"` // TODO: tf:alias - ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` - DockerImage *DockerImage `json:"docker_image,omitempty"` + SSHPublicKeys []string `json:"ssh_public_keys,omitempty" tf:"max_items:10"` + InitScripts []InitScriptStorageInfo `json:"init_scripts,omitempty" tf:"max_items:10"` // TODO: tf:alias + ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` + DockerImage *DockerImage `json:"docker_image,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` @@ -272,6 +312,8 @@ type ClusterInfo struct { SparkVersion string `json:"spark_version"` SparkConf map[string]string `json:"spark_conf,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` NodeTypeID string `json:"node_type_id,omitempty"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` @@ -285,7 +327,7 @@ type ClusterInfo struct { InstancePoolID string `json:"instance_pool_id,omitempty"` PolicyID string `json:"policy_id,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` - ClusterSource AwsAvailability `json:"cluster_source,omitempty"` + ClusterSource Availability `json:"cluster_source,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` State ClusterState `json:"state"` StateMessage string `json:"state_message,omitempty"` @@ -344,9 +386,16 @@ type Command struct { // InstancePoolAwsAttributes contains aws attributes for AWS Databricks deployments for instance pools type InstancePoolAwsAttributes struct { - Availability AwsAvailability `json:"availability,omitempty"` - ZoneID string `json:"zone_id"` - SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` + Availability Availability `json:"availability,omitempty"` + ZoneID string `json:"zone_id"` + SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` +} + +// InstancePoolAzureAttributes contains aws attributes for Azure Databricks deployments for instance pools +// https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/instance-pools#clusterinstancepoolazureattributes +type InstancePoolAzureAttributes struct { + Availability Availability `json:"availability,omitempty"` + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty" tf:"computed"` } // InstancePoolDiskType contains disk type information for each of the different cloud service providers @@ -364,17 +413,18 @@ type InstancePoolDiskSpec struct { // InstancePool describes the instance pool object on Databricks type InstancePool struct { - InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` - InstancePoolName string `json:"instance_pool_name"` - MinIdleInstances int32 `json:"min_idle_instances,omitempty"` - MaxCapacity int32 `json:"max_capacity,omitempty"` - IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` - AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` - NodeTypeID string `json:"node_type_id"` - CustomTags map[string]string `json:"custom_tags,omitempty"` - EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` - DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` - PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` + InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` + InstancePoolName string `json:"instance_pool_name"` + MinIdleInstances int32 `json:"min_idle_instances,omitempty"` + MaxCapacity int32 `json:"max_capacity,omitempty"` + IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"` + NodeTypeID string `json:"node_type_id"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` } // InstancePoolStats contains the stats on a given pool @@ -387,20 +437,21 @@ type InstancePoolStats struct { // InstancePoolAndStats encapsulates a get response from the GET api for instance pools on Databricks type InstancePoolAndStats struct { - InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` - InstancePoolName string `json:"instance_pool_name"` - MinIdleInstances int32 `json:"min_idle_instances,omitempty"` - MaxCapacity int32 `json:"max_capacity,omitempty"` - AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` - NodeTypeID string `json:"node_type_id"` - DefaultTags map[string]string `json:"default_tags,omitempty" tf:"computed"` - CustomTags map[string]string `json:"custom_tags,omitempty"` - IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` - EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` - DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` - PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` - State string `json:"state,omitempty"` - Stats *InstancePoolStats `json:"stats,omitempty"` + InstancePoolID string `json:"instance_pool_id,omitempty" tf:"computed"` + InstancePoolName string `json:"instance_pool_name"` + MinIdleInstances int32 `json:"min_idle_instances,omitempty"` + MaxCapacity int32 `json:"max_capacity,omitempty"` + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"` + NodeTypeID string `json:"node_type_id"` + DefaultTags map[string]string `json:"default_tags,omitempty" tf:"computed"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` + State string `json:"state,omitempty"` + Stats *InstancePoolStats `json:"stats,omitempty"` } // InstancePoolList shows list of instance pools diff --git a/compute/resource_cluster.go b/compute/resource_cluster.go index 1b524a3849..8cd9dd127e 100644 --- a/compute/resource_cluster.go +++ b/compute/resource_cluster.go @@ -40,17 +40,19 @@ func ResourceCluster() *schema.Resource { }.ToResource() } +func sparkConfDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool { + isPossiblyLegacyConfig := "spark_conf.%" == k && "1" == old && "0" == new + isLegacyConfig := "spark_conf.spark.databricks.delta.preview.enabled" == k + if isPossiblyLegacyConfig || isLegacyConfig { + log.Printf("[DEBUG] Suppressing diff for k=%#v old=%#v new=%#v", k, old, new) + return true + } + return false +} + func resourceClusterSchema() map[string]*schema.Schema { return common.StructToSchema(Cluster{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { - s["spark_conf"].DiffSuppressFunc = func(k, old, new string, d *schema.ResourceData) bool { - isPossiblyLegacyConfig := "spark_conf.%" == k && "1" == old && "0" == new - isLegacyConfig := "spark_conf.spark.databricks.delta.preview.enabled" == k - if isPossiblyLegacyConfig || isLegacyConfig { - log.Printf("[DEBUG] Suppressing diff for k=%#v old=%#v new=%#v", k, old, new) - return true - } - return false - } + s["spark_conf"].DiffSuppressFunc = sparkConfDiffSuppressFunc // adds `libraries` configuration block s["library"] = common.StructToSchema(ClusterLibraryList{}, func(ss map[string]*schema.Schema) map[string]*schema.Schema { @@ -68,6 +70,9 @@ func resourceClusterSchema() map[string]*schema.Schema { Optional: true, Computed: true, } + s["aws_attributes"].ConflictsWith = []string{"azure_attributes", "gcp_attributes"} + s["azure_attributes"].ConflictsWith = []string{"aws_attributes", "gcp_attributes"} + s["gcp_attributes"].ConflictsWith = []string{"aws_attributes", "azure_attributes"} s["is_pinned"] = &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -321,6 +326,15 @@ func modifyClusterRequest(clusterModel *Cluster) { } clusterModel.AwsAttributes = &awsAttributes } + if clusterModel.AzureAttributes != nil { + clusterModel.AzureAttributes = nil + } + if clusterModel.GcpAttributes != nil { + gcpAttributes := GcpAttributes{ + GoogleServiceAccount: clusterModel.GcpAttributes.GoogleServiceAccount, + } + clusterModel.GcpAttributes = &gcpAttributes + } clusterModel.EnableElasticDisk = false clusterModel.NodeTypeID = "" clusterModel.DriverNodeTypeID = "" diff --git a/compute/resource_cluster_test.go b/compute/resource_cluster_test.go index e21f9cfb8a..ec423137e9 100644 --- a/compute/resource_cluster_test.go +++ b/compute/resource_cluster_test.go @@ -1055,7 +1055,7 @@ func TestResourceClusterUpdate_FailNumWorkersZero(t *testing.T) { require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) } -func TestModifyClusterRequest(t *testing.T) { +func TestModifyClusterRequestAws(t *testing.T) { c := Cluster{ InstancePoolID: "a", AwsAttributes: &AwsAttributes{ @@ -1072,3 +1072,37 @@ func TestModifyClusterRequest(t *testing.T) { assert.Equal(t, "", c.DriverNodeTypeID) assert.Equal(t, false, c.EnableElasticDisk) } + +func TestModifyClusterRequestAzure(t *testing.T) { + c := Cluster{ + InstancePoolID: "a", + AzureAttributes: &AzureAttributes{ + FirstOnDemand: 1, + }, + EnableElasticDisk: true, + NodeTypeID: "d", + DriverNodeTypeID: "e", + } + modifyClusterRequest(&c) + assert.Nil(t, c.AzureAttributes) + assert.Equal(t, "", c.NodeTypeID) + assert.Equal(t, "", c.DriverNodeTypeID) + assert.Equal(t, false, c.EnableElasticDisk) +} + +func TestModifyClusterRequestGcp(t *testing.T) { + c := Cluster{ + InstancePoolID: "a", + GcpAttributes: &GcpAttributes{ + UsePreemptibleExecutors: true, + }, + EnableElasticDisk: true, + NodeTypeID: "d", + DriverNodeTypeID: "e", + } + modifyClusterRequest(&c) + assert.Equal(t, false, c.GcpAttributes.UsePreemptibleExecutors) + assert.Equal(t, "", c.NodeTypeID) + assert.Equal(t, "", c.DriverNodeTypeID) + assert.Equal(t, false, c.EnableElasticDisk) +} diff --git a/compute/resource_instance_pool.go b/compute/resource_instance_pool.go index 1f09ff43c1..382be51631 100644 --- a/compute/resource_instance_pool.go +++ b/compute/resource_instance_pool.go @@ -61,6 +61,8 @@ func ResourceInstancePool() *schema.Resource { s["custom_tags"].ForceNew = true s["enable_elastic_disk"].ForceNew = true s["enable_elastic_disk"].Default = true + s["aws_attributes"].ConflictsWith = []string{"azure_attributes"} + s["azure_attributes"].ConflictsWith = []string{"aws_attributes"} // TODO: check if it's really force new... if v, err := common.SchemaPath(s, "aws_attributes", "availability"); err == nil { v.ForceNew = true @@ -71,6 +73,12 @@ func ResourceInstancePool() *schema.Resource { if v, err := common.SchemaPath(s, "aws_attributes", "spot_bid_price_percent"); err == nil { v.ForceNew = true } + if v, err := common.SchemaPath(s, "azure_attributes", "availability"); err == nil { + v.ForceNew = true + } + if v, err := common.SchemaPath(s, "azure_attributes", "spot_bid_max_price"); err == nil { + v.ForceNew = true + } if v, err := common.SchemaPath(s, "disk_spec", "disk_type", "azure_disk_volume_type"); err == nil { v.ForceNew = true // nolint diff --git a/compute/resource_job.go b/compute/resource_job.go index 0d0e85b1d3..be5c0b2d23 100644 --- a/compute/resource_job.go +++ b/compute/resource_job.go @@ -115,6 +115,10 @@ var jobSchema = common.StructToSchema(JobSettings{}, p.Required = false } + if p, err := common.SchemaPath(s, "schedule", "pause_status"); err == nil { + p.ValidateFunc = validation.StringInSlice([]string{"PAUSED", "UNPAUSED"}, false) + } + if v, err := common.SchemaPath(s, "new_cluster", "spark_conf"); err == nil { v.DiffSuppressFunc = func(k, old, new string, d *schema.ResourceData) bool { isPossiblyLegacyConfig := "new_cluster.0.spark_conf.%" == k && "1" == old && "0" == new diff --git a/compute/resource_job_test.go b/compute/resource_job_test.go index 8a4fff291b..153878d954 100644 --- a/compute/resource_job_test.go +++ b/compute/resource_job_test.go @@ -31,6 +31,11 @@ func TestResourceJobCreate(t *testing.T) { Jar: "dbfs://aa/bb/cc.jar", }, }, + Schedule: &CronSchedule{ + QuartzCronExpression: "0 15 22 ? * *", + TimezoneID: "America/Los_Angeles", + PauseStatus: "PAUSED", + }, Name: "Featurizer", MaxRetries: 3, MinRetryIntervalMillis: 5000, @@ -64,6 +69,11 @@ func TestResourceJobCreate(t *testing.T) { MinRetryIntervalMillis: 5000, RetryOnTimeout: true, MaxConcurrentRuns: 1, + Schedule: &CronSchedule{ + QuartzCronExpression: "0 15 22 ? * *", + TimezoneID: "America/Los_Angeles", + PauseStatus: "PAUSED", + }, }, }, }, @@ -76,7 +86,11 @@ func TestResourceJobCreate(t *testing.T) { min_retry_interval_millis = 5000 name = "Featurizer" retry_on_timeout = true - + schedule { + quartz_cron_expression = "0 15 22 ? * *" + timezone_id = "America/Los_Angeles" + pause_status = "PAUSED" + } spark_jar_task { main_class_name = "com.labs.BarMain" } diff --git a/compute/resource_pipeline.go b/compute/resource_pipeline.go new file mode 100644 index 0000000000..38273f6a0f --- /dev/null +++ b/compute/resource_pipeline.go @@ -0,0 +1,252 @@ +package compute + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/databrickslabs/terraform-provider-databricks/common" +) + +// DefaultTimeout is the default amount of time that Terraform will wait when creating, updating and deleting pipelines. +const DefaultTimeout = 20 * time.Minute + +// We separate this struct from Cluster for two reasons: +// 1. Pipeline clusters include a `Label` field. +// 2. Spark version is not required (and shouldn't be specified) for pipeline clusters. +// 3. num_workers is optional, and there is no single-node support for pipelines clusters. +type pipelineCluster struct { + Label string `json:"label,omitempty"` // used only by pipelines + + NumWorkers int32 `json:"num_workers,omitempty" tf:"group:size"` + Autoscale *AutoScale `json:"autoscale,omitempty" tf:"group:size"` + + NodeTypeID string `json:"node_type_id,omitempty" tf:"group:node_type,computed"` + DriverNodeTypeID string `json:"driver_node_type_id,omitempty" tf:"conflicts:instance_pool_id,computed"` + InstancePoolID string `json:"instance_pool_id,omitempty" tf:"group:node_type"` + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty" tf:"conflicts:instance_pool_id"` + + SparkConf map[string]string `json:"spark_conf,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + + SSHPublicKeys []string `json:"ssh_public_keys,omitempty" tf:"max_items:10"` + InitScripts []InitScriptStorageInfo `json:"init_scripts,omitempty" tf:"max_items:10"` // TODO: tf:alias + ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` +} + +type notebookLibrary struct { + Path string `json:"path"` +} + +type pipelineLibrary struct { + Jar string `json:"jar,omitempty"` + Maven *Maven `json:"maven,omitempty"` + Whl string `json:"whl,omitempty"` + Notebook *notebookLibrary `json:"notebook,omitempty"` +} + +type filters struct { + Include []string `json:"include,omitempty"` + Exclude []string `json:"exclude,omitempty"` +} + +type pipelineSpec struct { + ID string `json:"id,omitempty" tf:"computed"` + Name string `json:"name,omitempty"` + Storage string `json:"storage,omitempty"` + Configuration map[string]string `json:"configuration,omitempty"` + Clusters []pipelineCluster `json:"clusters,omitempty"` + Libraries []pipelineLibrary `json:"libraries,omitempty"` + Filters *filters `json:"filters"` + Continuous bool `json:"continuous,omitempty"` + AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` +} + +type createPipelineResponse struct { + PipelineID string `json:"pipeline_id"` +} + +// PipelineState ... +type PipelineState string + +// Constants for PipelineStates +const ( + StateDeploying PipelineState = "DEPLOYING" + StateStarting PipelineState = "STARTING" + StateRunning PipelineState = "RUNNING" + StateStopping PipelineState = "STOPPPING" + StateDeleted PipelineState = "DELETED" + StateRecovering PipelineState = "RECOVERING" + StateFailed PipelineState = "FAILED" + StateResetting PipelineState = "RESETTING" + StateIdle PipelineState = "IDLE" +) + +// PipelineHealthStatus ... +type PipelineHealthStatus string + +// Constants for PipelineHealthStatus +const ( + HealthStatusHealthy PipelineHealthStatus = "HEALTHY" + HealthStatusUnhealthy PipelineHealthStatus = "UNHEALTHY" +) + +type pipelineInfo struct { + PipelineID string `json:"pipeline_id"` + Spec *pipelineSpec `json:"spec"` + State *PipelineState `json:"state"` + Cause string `json:"cause"` + ClusterID string `json:"cluster_id"` + Name string `json:"name"` + Health *PipelineHealthStatus `json:"health"` +} + +type pipelinesAPI struct { + client *common.DatabricksClient + ctx context.Context +} + +func newPipelinesAPI(ctx context.Context, m interface{}) pipelinesAPI { + return pipelinesAPI{m.(*common.DatabricksClient), ctx} +} + +func (a pipelinesAPI) create(s pipelineSpec, timeout time.Duration) (string, error) { + var resp createPipelineResponse + err := a.client.Post(a.ctx, "/pipelines", s, &resp) + if err != nil { + return "", err + } + id := resp.PipelineID + err = a.waitForState(id, timeout, StateRunning) + if err != nil { + log.Printf("[INFO] Pipeline creation failed, attempting to clean up pipeline %s", id) + err2 := a.delete(id, timeout) + if err2 != nil { + log.Printf("[WARN] Unable to delete pipeline %s; this resource needs to be manually cleaned up", id) + return "", fmt.Errorf("Multiple errors occurred when creating pipeline. Error while waiting for creation: \"%v\"; error while attempting to clean up failed pipeline: \"%v\"", err, err2) + } + log.Printf("[INFO] Successfully cleaned up pipeline %s", id) + return "", err + } + return id, nil +} + +func (a pipelinesAPI) read(id string) (p pipelineInfo, err error) { + err = a.client.Get(a.ctx, "/pipelines/"+id, nil, &p) + return +} + +func (a pipelinesAPI) update(id string, s pipelineSpec, timeout time.Duration) error { + err := a.client.Put(a.ctx, "/pipelines/"+id, s) + if err != nil { + return err + } + return a.waitForState(id, timeout, StateRunning) +} + +func (a pipelinesAPI) delete(id string, timeout time.Duration) error { + err := a.client.Delete(a.ctx, "/pipelines/"+id, map[string]string{}) + if err != nil { + return err + } + return resource.RetryContext(a.ctx, timeout, + func() *resource.RetryError { + i, err := a.read(id) + if err != nil { + if e, ok := err.(common.APIError); ok && e.IsMissing() { + return nil + } + return resource.NonRetryableError(err) + } + message := fmt.Sprintf("Pipeline %s is in state %s, not yet deleted", id, *i.State) + log.Printf("[DEBUG] %s", message) + return resource.RetryableError(fmt.Errorf(message)) + }) +} + +func (a pipelinesAPI) waitForState(id string, timeout time.Duration, desiredState PipelineState) error { + return resource.RetryContext(a.ctx, timeout, + func() *resource.RetryError { + i, err := a.read(id) + if err != nil { + return resource.NonRetryableError(err) + } + state := *i.State + if state == desiredState { + return nil + } + if state == StateFailed { + return resource.NonRetryableError(fmt.Errorf("Pipeline %s has failed", id)) + } + message := fmt.Sprintf("Pipeline %s is in state %s, not yet in state %s", id, state, desiredState) + log.Printf("[DEBUG] %s", message) + return resource.RetryableError(fmt.Errorf(message)) + }) +} + +func adjustPipelineResourceSchema(m map[string]*schema.Schema) map[string]*schema.Schema { + clusters, _ := m["clusters"].Elem.(*schema.Resource) + clustersSchema := clusters.Schema + clustersSchema["spark_conf"].DiffSuppressFunc = sparkConfDiffSuppressFunc + + awsAttributes, _ := clustersSchema["aws_attributes"].Elem.(*schema.Resource) + awsAttributesSchema := awsAttributes.Schema + delete(awsAttributesSchema, "first_on_demand") + delete(awsAttributesSchema, "availability") + delete(awsAttributesSchema, "spot_bid_price_percent") + delete(awsAttributesSchema, "ebs_volume_type") + delete(awsAttributesSchema, "ebs_volume_count") + delete(awsAttributesSchema, "ebs_volume_size") + + m["libraries"].MinItems = 1 + + return m +} + +// ResourcePipeline defines the Terraform resource for pipelines. +func ResourcePipeline() *schema.Resource { + var pipelineSchema = common.StructToSchema(pipelineSpec{}, adjustPipelineResourceSchema) + return common.Resource{ + Schema: pipelineSchema, + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var s pipelineSpec + err := common.DataToStructPointer(d, pipelineSchema, &s) + if err != nil { + return err + } + api := newPipelinesAPI(ctx, c) + id, err := api.create(s, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + d.SetId(id) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + i, err := newPipelinesAPI(ctx, c).read(d.Id()) + if err != nil { + return err + } + return common.StructToData(*i.Spec, pipelineSchema, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var s pipelineSpec + if err := common.DataToStructPointer(d, pipelineSchema, &s); err != nil { + return err + } + return newPipelinesAPI(ctx, c).update(d.Id(), s, d.Timeout(schema.TimeoutUpdate)) + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + api := newPipelinesAPI(ctx, c) + return api.delete(d.Id(), d.Timeout(schema.TimeoutDelete)) + }, + Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(DefaultTimeout), + }, + }.ToResource() +} diff --git a/compute/resource_pipeline_test.go b/compute/resource_pipeline_test.go new file mode 100644 index 0000000000..14b959384f --- /dev/null +++ b/compute/resource_pipeline_test.go @@ -0,0 +1,511 @@ +package compute + +import ( + "testing" + + "github.com/databrickslabs/terraform-provider-databricks/common" + + "github.com/databrickslabs/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" +) + +var basicPipelineSpec = pipelineSpec{ + Name: "test-pipeline", + Storage: "/test/storage", + Configuration: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + Clusters: []pipelineCluster{ + { + Label: "default", + CustomTags: map[string]string{ + "cluster_tag1": "cluster_value1", + }, + }, + }, + Libraries: []pipelineLibrary{ + { + Jar: "dbfs:/pipelines/code/abcde.jar", + }, + { + Maven: &Maven{ + Coordinates: "com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.18", + }, + }, + }, + Filters: &filters{ + Include: []string{"com.databricks.include"}, + Exclude: []string{"com.databricks.exclude"}, + }, +} + +func TestResourcePipelineCreate(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/pipelines", + ExpectedRequest: basicPipelineSpec, + Response: createPipelineResponse{ + PipelineID: "abcd", + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: map[string]interface{}{ + "id": "abcd", + "name": "test-pipeline", + "state": "DEPLOYING", + "spec": basicPipelineSpec, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: map[string]interface{}{ + "id": "abcd", + "name": "test-pipeline", + "state": "RUNNING", + "spec": basicPipelineSpec, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: map[string]interface{}{ + "id": "abcd", + "name": "test-pipeline", + "state": "RUNNING", + "spec": basicPipelineSpec, + }, + }, + }, + Create: true, + Resource: ResourcePipeline(), + HCL: `name = "test-pipeline" + storage = "/test/storage" + configuration = { + key1 = "value1" + key2 = "value2" + } + clusters { + label = "default" + custom_tags = { + "cluster_tag1" = "cluster_value1" + } + } + libraries { + jar = "dbfs:/pipelines/code/abcde.jar" + } + libraries { + maven { + coordinates = "com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.18" + } + } + filters { + include = ["com.databricks.include"] + exclude = ["com.databricks.exclude"] + } + continuous = false + `, + }.Apply(t) + assert.NoError(t, err, err) + assert.Equal(t, "abcd", d.Id()) +} + +func TestResourcePipelineCreate_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/pipelines", + Response: common.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourcePipeline(), + HCL: `name = "test" + storage = "/test/storage" + libraries { + jar = "jar" + } + filters { + include = ["a"] + } + `, + Create: true, + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "", d.Id(), "Id should be empty for error creates") +} + +func TestResourcePipelineCreate_ErrorWhenWaitingFailedCleanup(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/pipelines", + Response: createPipelineResponse{ + PipelineID: "abcd", + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: map[string]interface{}{ + "id": "abcd", + "name": "test-pipeline", + "state": "FAILED", + }, + }, + { + Method: "DELETE", + Resource: "/api/2.0/pipelines/abcd", + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "INTERNAL_ERROR", + Message: "Internal error", + }, + Status: 500, + }, + }, + Resource: ResourcePipeline(), + HCL: `name = "test" + storage = "/test/storage" + libraries { + jar = "jar" + } + filters { + include = ["a"] + } + `, + Create: true, + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Multiple errors occurred when creating pipeline. Error while waiting for creation: \"Pipeline abcd has failed\"; error while attempting to clean up failed pipeline: \"Internal error\"") + assert.Equal(t, "", d.Id(), "Id should be empty for error creates") +} + +func TestResourcePipelineCreate_ErrorWhenWaitingSuccessfulCleanup(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/pipelines", + Response: createPipelineResponse{ + PipelineID: "abcd", + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: map[string]interface{}{ + "id": "abcd", + "name": "test-pipeline", + "state": "FAILED", + }, + }, + { + Method: "DELETE", + Resource: "/api/2.0/pipelines/abcd", + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "No such resource", + }, + Status: 404, + }, + }, + Resource: ResourcePipeline(), + HCL: `name = "test" + storage = "/test/storage" + libraries { + jar = "jar" + } + filters { + include = ["a"] + } + `, + Create: true, + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Pipeline abcd has failed") + assert.Equal(t, "", d.Id(), "Id should be empty for error creates") +} + +func TestResourcePipelineRead(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: pipelineInfo{ + PipelineID: "abcd", + Spec: &basicPipelineSpec, + }, + }, + }, + Resource: ResourcePipeline(), + Read: true, + New: true, + ID: "abcd", + }.Apply(t) + assert.NoError(t, err, err) + assert.Equal(t, "abcd", d.Id(), "Id should not be empty") + assert.Equal(t, "/test/storage", d.Get("storage")) + assert.Equal(t, "value1", d.Get("configuration.key1")) + assert.Equal(t, "cluster_value1", d.Get("clusters.0.custom_tags.cluster_tag1")) + assert.Equal(t, "com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.18", d.Get("libraries.1.maven.0.coordinates")) + assert.Equal(t, "com.databricks.include", d.Get("filters.0.include.0")) + assert.Equal(t, false, d.Get("continuous")) +} + +func TestResourcePipelineRead_NotFound(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "NOT_FOUND", + Message: "Item not found", + }, + Status: 404, + }, + }, + Resource: ResourcePipeline(), + Read: true, + Removed: true, + ID: "abcd", + }.ApplyNoError(t) +} + +func TestResourcePipelineRead_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourcePipeline(), + Read: true, + ID: "abcd", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "abcd", d.Id(), "Id should not be empty for error reads") +} + +func TestResourcePipelineUpdate(t *testing.T) { + state := StateRunning + spec := pipelineSpec{ + ID: "abcd", + Name: "test", + Storage: "/test/storage", + Libraries: []pipelineLibrary{ + { + Maven: &Maven{ + Coordinates: "coordinates", + }, + }, + }, + Filters: &filters{ + Include: []string{"com.databricks.include"}, + }, + } + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PUT", + Resource: "/api/2.0/pipelines/abcd", + ExpectedRequest: spec, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: pipelineInfo{ + PipelineID: "abcd", + Spec: &spec, + State: &state, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: pipelineInfo{ + PipelineID: "abcd", + Spec: &spec, + State: &state, + }, + }, + }, + Resource: ResourcePipeline(), + HCL: `name = "test" + storage = "/test/storage" + libraries { + maven { + coordinates = "coordinates" + } + } + filters { + include = [ "com.databricks.include" ] + }`, + Update: true, + ID: "abcd", + }.Apply(t) + assert.NoError(t, err, err) + assert.Equal(t, "abcd", d.Id(), "Id should be the same as in reading") +} + +func TestResourcePipelineUpdate_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { // read log output for better stub url... + Method: "PUT", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourcePipeline(), + HCL: `name = "test" + storage = "/test/storage" + libraries { + maven { + coordinates = "coordinates" + } + } + filters { + include = [ "com.databricks.include" ] + }`, + Update: true, + ID: "abcd", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "abcd", d.Id()) +} + +func TestResourcePipelineUpdate_FailsAfterUpdate(t *testing.T) { + state := StateFailed + spec := pipelineSpec{ + ID: "abcd", + Name: "test", + Storage: "/test/storage", + Libraries: []pipelineLibrary{ + { + Maven: &Maven{ + Coordinates: "coordinates", + }, + }, + }, + Filters: &filters{ + Include: []string{"com.databricks.include"}, + }, + } + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PUT", + Resource: "/api/2.0/pipelines/abcd", + ExpectedRequest: spec, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: pipelineInfo{ + PipelineID: "abcd", + Spec: &spec, + State: &state, + }, + }, + }, + Resource: ResourcePipeline(), + HCL: `name = "test" + storage = "/test/storage" + libraries { + maven { + coordinates = "coordinates" + } + } + filters { + include = [ "com.databricks.include" ] + }`, + Update: true, + ID: "abcd", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Pipeline abcd has failed") + assert.Equal(t, "abcd", d.Id(), "Id should be the same as in reading") +} + +func TestResourcePipelineDelete(t *testing.T) { + state := StateRunning + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "DELETE", + Resource: "/api/2.0/pipelines/abcd", + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: pipelineInfo{ + PipelineID: "abcd", + Spec: &basicPipelineSpec, + State: &state, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "No such resource", + }, + Status: 404, + }, + }, + Resource: ResourcePipeline(), + Delete: true, + ID: "abcd", + }.Apply(t) + assert.NoError(t, err, err) + assert.Equal(t, "abcd", d.Id()) +} + +func TestResourcePipelineDelete_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "DELETE", + Resource: "/api/2.0/pipelines/abcd", + Response: common.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 500, + }, + }, + Resource: ResourcePipeline(), + Delete: true, + ID: "abcd", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "abcd", d.Id()) +} diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index 7874a5cb1c..02c6aa8f56 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -54,7 +54,7 @@ terraform { required_providers { databricks = { source = "databrickslabs/databricks" - version = "0.3.1" + version = "0.3.2" } } } @@ -177,8 +177,9 @@ data "databricks_aws_bucket_policy" "this" { } resource "aws_s3_bucket_policy" "root_bucket_policy" { - bucket = aws_s3_bucket.root_storage_bucket.id - policy = data.databricks_aws_bucket_policy.this.json + bucket = aws_s3_bucket.root_storage_bucket.id + policy = data.databricks_aws_bucket_policy.this.json + depends_on = [aws_s3_bucket_public_access_block.root_storage_bucket] } resource "databricks_mws_storage_configurations" "this" { diff --git a/docs/guides/workspace-management.md b/docs/guides/workspace-management.md index 477704efc9..82ae6a33b8 100644 --- a/docs/guides/workspace-management.md +++ b/docs/guides/workspace-management.md @@ -11,7 +11,7 @@ terraform { required_providers { databricks = { source = "databrickslabs/databricks" - version = "0.3.1" + version = "0.3.2" } } } diff --git a/docs/index.md b/docs/index.md index 16509d2275..69d0c3209b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -239,10 +239,18 @@ resource "databricks_user" "my-user" { resides. Alternatively, you can provide this value as an environment variable `DATABRICKS_AZURE_TENANT_ID` or `ARM_TENANT_ID`. * `azure_environment` - (optional) This is the Azure Environment which defaults to the `public` cloud. Other options are `german`, `china` and `usgovernment`. Alternatively, you can provide this value as an environment variable `ARM_ENVIRONMENT`. * `pat_token_duration_seconds` - The current implementation of the azure auth via sp requires the provider to create a temporary personal access token within Databricks. The current AAD implementation does not cover all the APIs for Authentication. This field determines the duration in which that temporary PAT token is alive. It is measured in seconds and will default to `3600` seconds. + +There are multiple environment variable options, the `DATABRICKS_AZURE_*` environment variables take precedence, and the `ARM_*` environment variables provide a way to share authentication configuration using the `databricks` provider alongside the `azurerm` provider. + +## Miscellaneous configuration parameters + +This section covers configuration parameters not related to authentication. They could be used when debugging problems, or do an additional tuning of provider's behaviour: + +* `rate_limit` - defines maximum number of requests per second made to Databricks REST API by Terraform. Default is *15*. * `debug_truncate_bytes` - Applicable only when `TF_LOG=DEBUG` is set. Truncate JSON fields in HTTP requests and responses above this limit. Default is *96*. * `debug_headers` - Applicable only when `TF_LOG=DEBUG` is set. Debug HTTP headers of requests made by the provider. Default is *false*. We recommend to turn this flag on only under exceptional circumstances, when troubleshooting authentication issues. Turning this flag on will log first `debug_truncate_bytes` of any HTTP header value in cleartext. +* `skip_verify` - skips SSL certificate verification for HTTP calls. *Use at your own risk.* Default is *false* (don't skip verification). -There are multiple environment variable options, the `DATABRICKS_AZURE_*` environment variables take precedence, and the `ARM_*` environment variables provide a way to share authentication configuration using the `databricks` provider alongside the `azurerm` provider. ## Environment variables @@ -266,6 +274,8 @@ The following configuration attributes can be passed via environment variables: | `azure_environment` | `ARM_ENVIRONMENT` | | `debug_truncate_bytes` | `DATABRICKS_DEBUG_TRUNCATE_BYTES` | | `debug_headers` | `DATABRICKS_DEBUG_HEADERS` | +| `rate_limit` | `DATABRICKS_RATE_LIMIT` | + ## Empty provider block diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index fea17a1309..72ad125cef 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -196,7 +196,7 @@ cluster_log_conf { There are a few more advanced attributes for S3 log delivery: -* `destination` - S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. +* `destination` - S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. * `region` - (Optional) S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used. * `endpoint` - (Optional) S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used. * `enable_encryption` - (Optional) Enable server-side encryption, false by default. @@ -227,7 +227,15 @@ init_scripts { } ``` -Attributes are the same as for the `cluster_log_conf` configuration block. +Like the `cluster_log_conf` configuration block, init scripts support S3 and DBFS locations. In addition, you can also specify a local file as follows: + +```hcl +init_scripts { + file { + destination = "file:/my/local/file.sh" + } +} +``` ## aws_attributes @@ -259,7 +267,7 @@ resource "databricks_cluster" "this" { The following options are available: * `zone_id` - (Required) Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like “us-west-2a”. The provided availability zone must be in the same region as the Databricks deployment. For example, “us-west-2a” is not a valid zone ID if the Databricks deployment resides in the “us-east-1” region. -* `availability` - (Optional) Availability type used for all subsequent nodes past the `first_on_demand` ones. Valid values are `SPOT` and `ON_DEMAND`. Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster. +* `availability` - (Optional) Availability type used for all subsequent nodes past the `first_on_demand` ones. Valid values are `SPOT`, `SPOT_WITH_FALLBACK` and `ON_DEMAND`. Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster. * `first_on_demand` - (Optional) The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `first_on_demand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. * `spot_bid_price_percent` - (Optional) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new `i3.xlarge` spot instance, then the max price is half of the price of on-demand `i3.xlarge` instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand `i3.xlarge` instances. If not specified, the default value is `100`. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than `10000`. * `instance_profile_arn` - (Optional) Nodes for this cluster will only be placed on AWS instances with this instance profile. Please see [databricks_instance_profile](instance_profile.md) resource documentation for extended examples on adding a valid instance profile using Terraform. @@ -267,6 +275,49 @@ The following options are available: * `ebs_volume_count` - (Optional) The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden. * `ebs_volume_size` - (Optional) The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized). +## azure_attributes + +`azure_attributes` optional configuration block contains attributes related to [clusters running on Azure](https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#--azureattributes). + +-> **Note** *(Azure only)* Please specify empty configuration block (`azure_attributes {}`), even if you're not setting any custom values. This will prevent any resource update issues. + +Here is the example of shared autoscaling cluster with some of AWS options set: + +```hcl +resource "databricks_cluster" "this" { + cluster_name = "Shared Autoscaling" + spark_version = "6.6.x-scala2.11" + node_type_id = "Standard_DS3_v2" + autotermination_minutes = 20 + autoscale { + min_workers = 1 + max_workers = 50 + } + azure_attributes { + availability = "SPOT_AZURE" + first_on_demand = 1 + spot_bid_max_price = 100 + } +} +``` + +The following options are [available](https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#--azureattributes): + +* `availability` - (Optional) Availability type used for all subsequent nodes past the `first_on_demand` ones. Valid values are `SPOT_AZURE`, `SPOT_WITH_FALLBACK`, and `ON_DEMAND_AZURE`. Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster. +* `first_on_demand` - (Optional) The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `first_on_demand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. +* `spot_bid_max_price` - (Optional) The max price for Azure spot instances. Use `-1` to specify lowest price. + +## gcp_attributes + +`gcp_attributes` optional configuration block contains attributes related to [clusters running on GCP](https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#clustergcpattributes). + +-> **Note** *(GCP only)* Please specify empty configuration block (`gcp_attributes {}`), even if you're not setting any custom values. This will prevent any resource update issues. + +The following options are available: + +* `use_preemptible_executors` - (Optional, bool) if we should use preemptible executors ([GCP documentation](https://cloud.google.com/compute/docs/instances/preemptible)) +* `google_service_account` - (Optional, string) Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources. + ## docker_image [Databricks Container Services](https://docs.databricks.com/clusters/custom-containers.html) lets you specify a Docker image when you create a cluster. You need to enable Container Services in *Admin Console / Advanced* page in the user interface. By enabling this feature, you acknowledge and agree that your usage of this feature is subject to the [applicable additional terms](http://www.databricks.com/product-specific-terms). diff --git a/docs/resources/instance_pool.md b/docs/resources/instance_pool.md index e4b2d5dc3d..b4d4f9198b 100644 --- a/docs/resources/instance_pool.md +++ b/docs/resources/instance_pool.md @@ -56,6 +56,17 @@ The following arguments are required: * `availability` - (Optional) (String) Availability type used for all instances in the pool. Only `ON_DEMAND` and `SPOT` are supported. * `zone_id` - (Required) (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of a form like `"us-west-2a"`. The provided availability zone must be in the same region as the Databricks deployment. For example, `"us-west-2a"` is not a valid zone ID if the Databricks deployment resides in the `"us-east-1"` region. This is an optional field. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the [List Zones API](https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterservicelistavailablezones). +## azure_attributes Configuration Block + +`azure_attributes` optional configuration block contains attributes related to [instance pools on Azure](https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/instance-pools#--instancepoolazureattributes). + +-> **Note** *(Azure only)* Please specify empty configuration block (`azure_attributes {}`), even if you're not setting any custom values. This will prevent any resource update issues. + +The following options are [available](https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#--azureattributes): + +* `availability` - (Optional) Availability type used for all subsequent nodes past the `first_on_demand` ones. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. +* `spot_bid_max_price` - (Optional) The max price for Azure spot instances. Use `-1` to specify lowest price. + ### disk_spec Configuration Block diff --git a/docs/resources/job.md b/docs/resources/job.md index 1131f7d5ad..84b69ad01a 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -69,6 +69,7 @@ The following arguments are required: * `quartz_cron_expression` - (Required) A [Cron expression using Quartz syntax](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) that describes the schedule for a job. This field is required. * `timezone_id` - (Required) A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required. +* `pause_status` - (Optional) Indicate whether this schedule is paused or not. Either “PAUSED” or “UNPAUSED”. When the pause_status field is omitted and a schedule is provided, the server will default to using "UNPAUSED" as a value for pause_status. ### spark_jar_task Configuration Block diff --git a/go.mod b/go.mod index 854e4fd6f1..5060c54a91 100644 --- a/go.mod +++ b/go.mod @@ -7,18 +7,18 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.13 github.com/Azure/go-autorest/autorest/azure/auth v0.5.7 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 - github.com/aws/aws-sdk-go v1.37.20 - github.com/google/go-querystring v1.0.0 + github.com/aws/aws-sdk-go v1.38.10 + github.com/google/go-querystring v1.1.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-retryablehttp v0.6.8 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/hcl/v2 v2.9.0 + github.com/hashicorp/hcl/v2 v2.9.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 github.com/mitchellh/go-homedir v1.1.0 github.com/pkg/errors v0.9.1 github.com/smartystreets/goconvey v1.6.4 // indirect github.com/stretchr/testify v1.7.0 - github.com/zclconf/go-cty v1.8.0 + github.com/zclconf/go-cty v1.8.1 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/tools v0.1.0 // indirect gopkg.in/ini.v1 v1.62.0 diff --git a/go.sum b/go.sum index 2988bd2f72..10293f4cc8 100644 --- a/go.sum +++ b/go.sum @@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -35,13 +34,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.11 h1:L4/pmq7poLdsy41Bj1FayKvBhayuWRYkx9HU5i4Ybl0= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -53,7 +49,6 @@ github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8K github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= @@ -77,7 +72,6 @@ github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFU github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= @@ -86,8 +80,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.37.20 h1:CJCXpMYmBJrRH8YwoSE0oB9S3J5ax+62F14sYlDCztg= -github.com/aws/aws-sdk-go v1.37.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.10 h1:7lQrjAlyYrTGW2+9vnBv5HPSSuv+xDMmgU1YUnNSOOo= +github.com/aws/aws-sdk-go v1.38.10/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -102,7 +96,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -167,12 +160,11 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= @@ -203,14 +195,12 @@ github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUC github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= @@ -228,10 +218,9 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hcl/v2 v2.9.0 h1:7kJiMiKBqGHASbDJuFAMlpRMJLyhuLg/IsU/3EzwniA= -github.com/hashicorp/hcl/v2 v2.9.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/hcl/v2 v2.9.1 h1:eOy4gREY0/ZQHNItlfuEZqtcQbXIxzojlP301hDpnac= +github.com/hashicorp/hcl/v2 v2.9.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tnNKV/rgMba0= @@ -271,7 +260,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -332,7 +320,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -351,10 +338,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.8.1 h1:SI0LqNeNxAgv2WWqWJMlG2/Ad/6aYJ7IVYYMigmfkuI= +github.com/zclconf/go-cty v1.8.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -369,9 +356,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -433,7 +418,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= @@ -483,7 +467,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -493,7 +476,6 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -539,13 +521,11 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed h1:+qzWo37K31KxduIYaBeMqJ8MUOyTayOQKpH9aDPLMSY= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -612,7 +592,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -629,7 +608,6 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/identity/resource_group.go b/identity/resource_group.go index 9da9bdc046..f486f8fe8f 100644 --- a/identity/resource_group.go +++ b/identity/resource_group.go @@ -71,9 +71,10 @@ func ResourceGroup() *schema.Resource { // Changed to true if allowClusterCreate { entitlementsAddList = append(entitlementsAddList, string(AllowClusterCreateEntitlement)) + } else { + // Changed to false + entitlementsRemoveList = append(entitlementsRemoveList, string(AllowClusterCreateEntitlement)) } - // Changed to false - entitlementsRemoveList = append(entitlementsRemoveList, string(AllowClusterCreateEntitlement)) } // If allow_sql_analytics_access has changed if d.HasChange("allow_sql_analytics_access") { @@ -81,19 +82,21 @@ func ResourceGroup() *schema.Resource { // Changed to true if allowSQLAnalyticsAccess { entitlementsAddList = append(entitlementsAddList, string(AllowSQLAnalyticsAccessEntitlement)) + } else { + // Changed to false + entitlementsRemoveList = append(entitlementsRemoveList, string(AllowSQLAnalyticsAccessEntitlement)) } - // Changed to false - entitlementsRemoveList = append(entitlementsRemoveList, string(AllowSQLAnalyticsAccessEntitlement)) } // If allow_instance_pool_create has changed if d.HasChange("allow_instance_pool_create") { allowClusterCreate := d.Get("allow_instance_pool_create").(bool) // Changed to true if allowClusterCreate { - entitlementsAddList = append(entitlementsAddList, string(AllowClusterCreateEntitlement)) + entitlementsAddList = append(entitlementsAddList, string(AllowInstancePoolCreateEntitlement)) + } else { + // Changed to false + entitlementsRemoveList = append(entitlementsRemoveList, string(AllowInstancePoolCreateEntitlement)) } - // Changed to false - entitlementsRemoveList = append(entitlementsRemoveList, string(AllowClusterCreateEntitlement)) } // TODO: not currently possible to update group display name if entitlementsAddList != nil || entitlementsRemoveList != nil { @@ -161,7 +164,7 @@ func isGroupSQLAnalyticsAccessEntitled(group *ScimGroup) bool { func isGroupInstancePoolCreateEntitled(group *ScimGroup) bool { for _, entitlement := range group.Entitlements { - if entitlement.Value == AllowClusterCreateEntitlement { + if entitlement.Value == AllowInstancePoolCreateEntitlement { return true } } diff --git a/identity/resource_group_test.go b/identity/resource_group_test.go index ea39189184..b01a6916b4 100644 --- a/identity/resource_group_test.go +++ b/identity/resource_group_test.go @@ -3,6 +3,8 @@ package identity import ( "testing" + "github.com/stretchr/testify/require" + "github.com/databrickslabs/terraform-provider-databricks/common" "github.com/databrickslabs/terraform-provider-databricks/qa" @@ -18,6 +20,17 @@ func TestResourceGroupCreate(t *testing.T) { ExpectedRequest: ScimGroup{ Schemas: []URN{"urn:ietf:params:scim:schemas:core:2.0:Group"}, DisplayName: "Data Scientists", + Entitlements: []entitlementsListItem{ + { + AllowClusterCreateEntitlement, + }, + { + AllowSQLAnalyticsAccessEntitlement, + }, + { + AllowInstancePoolCreateEntitlement, + }, + }, }, Response: ScimGroup{ ID: "abc", @@ -30,17 +43,35 @@ func TestResourceGroupCreate(t *testing.T) { Schemas: []URN{"urn:ietf:params:scim:schemas:core:2.0:Group"}, DisplayName: "Data Scientists", ID: "abc", + Entitlements: []entitlementsListItem{ + { + AllowClusterCreateEntitlement, + }, + { + AllowSQLAnalyticsAccessEntitlement, + }, + { + AllowInstancePoolCreateEntitlement, + }, + }, }, }, }, Resource: ResourceGroup(), - State: map[string]interface{}{ - "display_name": "Data Scientists", - }, + HCL: ` + display_name = "Data Scientists" + allow_instance_pool_create = true + allow_cluster_create = true + allow_sql_analytics_access = true + `, Create: true, }.Apply(t) assert.NoError(t, err, err) assert.Equal(t, "abc", d.Id()) + assert.Equal(t, "Data Scientists", d.Get("display_name")) + assert.Equal(t, true, d.Get("allow_cluster_create")) + assert.Equal(t, true, d.Get("allow_instance_pool_create")) + assert.Equal(t, true, d.Get("allow_sql_analytics_access")) } func TestResourceGroupCreate_Error(t *testing.T) { @@ -67,6 +98,42 @@ func TestResourceGroupCreate_Error(t *testing.T) { } func TestResourceGroupRead(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/Groups/abc", + Response: ScimGroup{ + Schemas: []URN{"urn:ietf:params:scim:schemas:core:2.0:Group"}, + DisplayName: "Data Scientists", + ID: "abc", + Entitlements: []entitlementsListItem{ + { + AllowSQLAnalyticsAccessEntitlement, + }, + { + AllowClusterCreateEntitlement, + }, + { + AllowInstancePoolCreateEntitlement, + }, + }, + }, + }, + }, + Resource: ResourceGroup(), + Read: true, + ID: "abc", + }.Apply(t) + assert.NoError(t, err, err) + assert.Equal(t, "abc", d.Id(), "Id should not be empty") + assert.Equal(t, true, d.Get("allow_cluster_create")) + assert.Equal(t, true, d.Get("allow_instance_pool_create")) + assert.Equal(t, true, d.Get("allow_sql_analytics_access")) + assert.Equal(t, "Data Scientists", d.Get("display_name")) +} + +func TestResourceGroupRead_NoEntitlements(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { @@ -87,6 +154,7 @@ func TestResourceGroupRead(t *testing.T) { assert.Equal(t, "abc", d.Id(), "Id should not be empty") assert.Equal(t, false, d.Get("allow_cluster_create")) assert.Equal(t, false, d.Get("allow_instance_pool_create")) + assert.Equal(t, false, d.Get("allow_sql_analytics_access")) assert.Equal(t, "Data Scientists", d.Get("display_name")) } @@ -131,13 +199,13 @@ func TestResourceGroupRead_Error(t *testing.T) { assert.Equal(t, "abc", d.Id(), "Id should not be empty for error reads") } -func TestResourceGroupUpdate(t *testing.T) { +func TestResourceGroupUpdate_AddPerms(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { Method: "PATCH", Resource: "/api/2.0/preview/scim/v2/Groups/abc", - Response: GroupPatchRequest{ + ExpectedRequest: GroupPatchRequest{ Schemas: []URN{"urn:ietf:params:scim:api:messages:2.0:PatchOp"}, Operations: []GroupPatchOperations{ { @@ -147,12 +215,14 @@ func TestResourceGroupUpdate(t *testing.T) { { Value: "allow-cluster-create", }, + { + Value: "sql-analytics-access", + }, + { + Value: "allow-instance-pool-create", + }, }, }, - { - Op: "remove", - Path: "entitlements[value eq \"allow-cluster-create\"]", - }, }, }, }, @@ -163,19 +233,101 @@ func TestResourceGroupUpdate(t *testing.T) { Schemas: []URN{"urn:ietf:params:scim:schemas:core:2.0:Group"}, DisplayName: "Data Ninjas", ID: "abc", + Entitlements: []entitlementsListItem{ + { + AllowSQLAnalyticsAccessEntitlement, + }, + { + AllowClusterCreateEntitlement, + }, + { + AllowInstancePoolCreateEntitlement, + }, + }, }, }, }, Resource: ResourceGroup(), - State: map[string]interface{}{ + InstanceState: map[string]string{ "display_name": "Data Ninjas", - "allow_instance_pool_create": true, + "allow_instance_pool_create": "false", + "allow_cluster_create": "false", + "allow_sql_analytics_access": "false", }, + HCL: ` + display_name = "Data Ninjas" + allow_instance_pool_create = true + allow_cluster_create = true + allow_sql_analytics_access = true + `, Update: true, ID: "abc", }.Apply(t) assert.NoError(t, err, err) assert.Equal(t, "abc", d.Id(), "Id should be the same as in reading") + assert.Equal(t, "Data Ninjas", d.Get("display_name")) + assert.Equal(t, true, d.Get("allow_cluster_create")) + assert.Equal(t, true, d.Get("allow_instance_pool_create")) + assert.Equal(t, true, d.Get("allow_sql_analytics_access")) +} + +func TestResourceGroupUpdate_RemovePerms(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.0/preview/scim/v2/Groups/abc", + ExpectedRequest: GroupPatchRequest{ + Schemas: []URN{"urn:ietf:params:scim:api:messages:2.0:PatchOp"}, + Operations: []GroupPatchOperations{ + { + Op: "remove", + Path: "entitlements[value eq \"allow-cluster-create\"]", + }, + { + Op: "remove", + Path: "entitlements[value eq \"sql-analytics-access\"]", + }, + { + Op: "remove", + Path: "entitlements[value eq \"allow-instance-pool-create\"]", + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/Groups/abc", + Response: ScimGroup{ + Schemas: []URN{"urn:ietf:params:scim:schemas:core:2.0:Group"}, + DisplayName: "Data Ninjas", + ID: "abc", + Entitlements: []entitlementsListItem{}, + }, + }, + }, + Resource: ResourceGroup(), + Update: true, + ID: "abc", + InstanceState: map[string]string{ + "display_name": "Data Ninjas", + "allow_instance_pool_create": "true", + "allow_cluster_create": "true", + "allow_sql_analytics_access": "true", + }, + HCL: ` + display_name = "Data Ninjas" + allow_instance_pool_create = false + allow_cluster_create = false + allow_sql_analytics_access = false + `, + }.Apply(t) + require.NoError(t, err, err) + assert.Equal(t, "abc", d.Id(), "Id should not be empty") + assert.Equal(t, "Data Ninjas", d.Get("display_name")) + assert.Equal(t, false, d.Get("allow_cluster_create")) + assert.Equal(t, false, d.Get("allow_instance_pool_create")) + assert.Equal(t, false, d.Get("allow_sql_analytics_access")) } func TestResourceGroupUpdate_Error(t *testing.T) { diff --git a/provider/provider.go b/provider/provider.go index ebab6ccf47..1d100d2890 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -47,6 +47,7 @@ func DatabricksProvider() *schema.Provider { "databricks_cluster_policy": compute.ResourceClusterPolicy(), "databricks_instance_pool": compute.ResourceInstancePool(), "databricks_job": compute.ResourceJob(), + "databricks_pipeline": compute.ResourcePipeline(), "databricks_group": identity.ResourceGroup(), "databricks_group_instance_profile": identity.ResourceGroupInstanceProfile(), diff --git a/scripts/preview-integration/main.tf b/scripts/preview-integration/main.tf new file mode 100644 index 0000000000..97b5bea268 --- /dev/null +++ b/scripts/preview-integration/main.tf @@ -0,0 +1,3 @@ +output "preview" { + value = "true" +} \ No newline at end of file diff --git a/scripts/preview-integration/require_env b/scripts/preview-integration/require_env new file mode 100644 index 0000000000..9e70bc452c --- /dev/null +++ b/scripts/preview-integration/require_env @@ -0,0 +1,3 @@ +DATABRICKS_HOST +DATABRICKS_TOKEN +CLOUD_ENV \ No newline at end of file diff --git a/sqlanalytics/acceptance/sql_endpoint_test.go b/sqlanalytics/acceptance/sql_endpoint_test.go index 8e0a49a28a..4be8fce187 100644 --- a/sqlanalytics/acceptance/sql_endpoint_test.go +++ b/sqlanalytics/acceptance/sql_endpoint_test.go @@ -6,7 +6,7 @@ import ( "github.com/databrickslabs/terraform-provider-databricks/internal/acceptance" ) -func TestAccSQLEndpoint(t *testing.T) { +func TestPreviewAccSQLEndpoint(t *testing.T) { acceptance.Test(t, []acceptance.Step{ { Template: `resource "databricks_sql_endpoint" "this" { diff --git a/sqlanalytics/resource_sql_endpoint_test.go b/sqlanalytics/resource_sql_endpoint_test.go index dc89ce61ea..88dffb8a8b 100644 --- a/sqlanalytics/resource_sql_endpoint_test.go +++ b/sqlanalytics/resource_sql_endpoint_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccSQLEndpoints(t *testing.T) { +func TestPreviewAccSQLEndpoints(t *testing.T) { if _, ok := os.LookupEnv("CLOUD_ENV"); !ok { t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set") } @@ -106,6 +106,33 @@ func TestResourceSQLEndpointCreate(t *testing.T) { assert.Equal(t, "d7c9d05c-7496-4c69-b089-48823edad40c", d.Get("data_source_id")) } +func TestResourceSQLEndpointCreate_ErrorDisabled(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/sql/endpoints", + ExpectedRequest: SQLEndpoint{ + Name: "foo", + ClusterSize: "Small", + MaxNumClusters: 1, + }, + Status: 404, + Response: common.APIError{ + ErrorCode: "FEATURE_DISABLED", + Message: "SQL Analytics is not supported", + }, + }, + }, + Resource: ResourceSQLEndpoint(), + Create: true, + HCL: ` + name = "foo" + cluster_size = "Small" + `, + }.ExpectError(t, "SQL Analytics is not supported") +} + func TestResourceSQLEndpointRead(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{