From 7a3ee43b407bf376d99dd600b6ff9a9a786c5ca2 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Thu, 3 Dec 2020 22:51:59 +0100 Subject: [PATCH] Added databricks_service_principal resource. (#386) Directly creates service principal, that could be added to a group within workspace. ```hcl resource "databricks_service_principal" "sp" { application_id = "00000000-0000-0000-0000-000000000000" } ``` Co-authored-by: tcz001 Co-authored-by: Angel Villalain Garcia Co-authored-by: Serge Smertin --- CHANGELOG.md | 2 + docs/resources/permissions.md | 17 +- docs/resources/service_principal.md | 64 ++++ .../resource_service_principal_test.go | 49 +++ identity/resource_service_principal.go | 174 +++++++++ identity/resource_service_principal_test.go | 346 ++++++++++++++++++ identity/scim.go | 30 +- provider/provider.go | 1 + 8 files changed, 660 insertions(+), 23 deletions(-) create mode 100644 docs/resources/service_principal.md create mode 100644 identity/acceptance/resource_service_principal_test.go create mode 100644 identity/resource_service_principal.go create mode 100644 identity/resource_service_principal_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 7be3200a3c..3acecae378 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## 0.3.0 +* Added [databricks_service_principal](https://github.com/databrickslabs/terraform-provider-databricks/pull/386) resource. + **Behavior changes** * Removed deprecated `library_jar`, `library_egg`, `library_whl`, `library_pypi`, `library_cran`, and `library_maven` from `databricks_cluster` and `databricks_job` in favor of more API-transparent [library](https://registry.terraform.io/providers/databrickslabs/databricks/latest/docs/resources/cluster#library-configuration-block) configuration block. diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index 7d04dec341..c1e71d149d 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -92,7 +92,7 @@ resource "databricks_permissions" "policy_usage" { ## Instance Pool usage -[Instance Pools](instance_pool.md) access control [allows to](https://docs.databricks.com/security/access-control/pool-acl.html) assign `CAN_ATTACH_TO` and `CAN_MANAGE` permissions to users and groups. It's also possible to grant creation of Instance Pools to individual [groups](group.md#allow_instance_pool_create) and [users](user.md#allow_instance_pool_create). +[Instance Pools](instance_pool.md) access control [allows to](https://docs.databricks.com/security/access-control/pool-acl.html) assign `CAN_ATTACH_TO` and `CAN_MANAGE` permissions to users, service principals, and groups. It's also possible to grant creation of Instance Pools to individual [groups](group.md#allow_instance_pool_create) and [users](user.md#allow_instance_pool_create), [service principals](service_principal.md#allow_instance_pool_create). ```hcl resource "databricks_group" "auto" { @@ -128,12 +128,12 @@ resource "databricks_permissions" "pool_usage" { ## Job usage -There are four assignable [permission levels](https://docs.databricks.com/security/access-control/jobs-acl.html#job-permissions) for [databricks_job](job.md): `CAN_VIEW`, `CAN_MANAGE_RUN`, `IS_OWNER`, and `CAN_MANAGE`. Admins are granted the `CAN_MANAGE` permission by default, and they can assign that permission to non-admin users. +There are four assignable [permission levels](https://docs.databricks.com/security/access-control/jobs-acl.html#job-permissions) for [databricks_job](job.md): `CAN_VIEW`, `CAN_MANAGE_RUN`, `IS_OWNER`, and `CAN_MANAGE`. Admins are granted the `CAN_MANAGE` permission by default, and they can assign that permission to non-admin users, and service principals. * The creator of a job has `IS_OWNER` permission. Destroying `databricks_permissions` resource for a job would revert ownership to the creator. * A job must have exactly one owner. If resource is changed and no owner is specified, currently authenticated principal would become new owner of the job. Nothing would change, per se, if the job was created through Terraform. * A job cannot have a group as an owner. -* Jobs triggered through *Run Now* assume the permissions of the job owner and not the user who issued Run Now. +* Jobs triggered through *Run Now* assume the permissions of the job owner and not the user, and service principal who issued Run Now. * Read [main documentation](https://docs.databricks.com/security/access-control/jobs-acl.html) for additional detail. ```hcl @@ -225,12 +225,12 @@ resource "databricks_permissions" "notebook_usage" { ## Folder usage -Valid [permission levels](https://docs.databricks.com/security/access-control/workspace-acl.html#folder-permissions) for folders of [databricks_notebook](notebook.md) are: `CAN_READ`, `CAN_RUN`, `CAN_EDIT`, and `CAN_MANAGE`. Notebooks and experiments in a folder inherit all permissions settings of that folder. For example, a user that has `CAN_RUN` permission on a folder has `CAN_RUN` permission on the notebooks in that folder. +Valid [permission levels](https://docs.databricks.com/security/access-control/workspace-acl.html#folder-permissions) for folders of [databricks_notebook](notebook.md) are: `CAN_READ`, `CAN_RUN`, `CAN_EDIT`, and `CAN_MANAGE`. Notebooks and experiments in a folder inherit all permissions settings of that folder. For example, a user (or service principal) that has `CAN_RUN` permission on a folder has `CAN_RUN` permission on the notebooks in that folder. * All users can list items in the folder without any permissions. -* All users have `CAN_MANAGE` permission for items in the Workspace > Shared Icon Shared folder. You can grant `CAN_MANAGE` permission to notebooks and folders by moving them to the Shared Icon Shared folder. -* All users have `CAN_MANAGE` permission for objects the user creates. -* User home directory - The user has `CAN_MANAGE` permission. All other users can list their directories. +* All users (or service principals) have `CAN_MANAGE` permission for items in the Workspace > Shared Icon Shared folder. You can grant `CAN_MANAGE` permission to notebooks and folders by moving them to the Shared Icon Shared folder. +* All users (or service principals) have `CAN_MANAGE` permission for objects the user creates. +* User home directory - The user (or service principal) has `CAN_MANAGE` permission. All other users (or service principals) can list their directories. ```hcl resource "databricks_group" "auto" { @@ -274,7 +274,6 @@ resource "databricks_permissions" "folder_usage" { ## Passwords usage By default on AWS deployments, all admin users can sign in to Databricks using either SSO or their username and password, and all API users can authenticate to the Databricks REST APIs using their username and password. As an admin, you [can limit](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#optional-configure-password-access-control) admin users’ and API users’ ability to authenticate with their username and password by configuring `CAN_USE` permissions using password access control. -ign-On. ```hcl resource "databricks_group" "guests" { @@ -325,7 +324,7 @@ resource "databricks_permissions" "token_usage" { ## Secrets -One can control access to [databricks_secret](secret.md) through `initial_manage_principal` argument on [databricks_secret_scope](secret_scope.md) or [databricks_secret_acl](secret_acl.md), so that users can `READ`, `WRITE` or `MANAGE` entries within secret scope. +One can control access to [databricks_secret](secret.md) through `initial_manage_principal` argument on [databricks_secret_scope](secret_scope.md) or [databricks_secret_acl](secret_acl.md), so that users (or service principals) can `READ`, `WRITE` or `MANAGE` entries within secret scope. ## Tables, Views and Databases diff --git a/docs/resources/service_principal.md b/docs/resources/service_principal.md new file mode 100644 index 0000000000..1ca4b81c23 --- /dev/null +++ b/docs/resources/service_principal.md @@ -0,0 +1,64 @@ +# databricks_service_principal Resource + +Directly creates service principal, that could be added to [databricks_group](group.md) within workspace. + +## Example Usage + +Creating regular service principal: + +```hcl +resource "databricks_service_principal" "sp" { + application_id = "00000000-0000-0000-0000-000000000000" +} +``` + +Creating service principal with administrative permissions - referencing special `admins` [databricks_group](../data-sources/group.md) in [databricks_group_member](group_member.md) resource: + +```hcl +data "databricks_group" "admins" { + display_name = "admins" +} + +resource "databricks_service_principal" "sp" { + application_id = "00000000-0000-0000-0000-000000000000" +} + +resource "databricks_group_member" "i-am-admin" { + group_id = data.databricks_group.admins.id + member_id = databricks_service_principal.sp.id +} +``` + +Creating service principal with cluster create permissions: + +```hcl +resource "databricks_service_principal" "sp" { + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Example service principal" + allow_cluster_create = true +} +``` + +## Argument Reference + +The following arguments are available: + +* `application_id` - (Required) This is the application id of the given service principal and will be their form of access and identity. +* `display_name` - (Optional) This is an alias for the service principal can be the full name of the service principal. +* `allow_cluster_create` - (Optional) Allow the service principal to have [cluster](cluster.md) create priviliges. Defaults to false. More fine grained permissions could be assigned with [databricks_permissions](permissions.md#Cluster-usage) and `cluster_id` argument. Everyone without `allow_cluster_create` arugment set, but with [permission to use](permissions.md#Cluster-Policy-usage) Cluster Policy would be able to create clusters, but within boundaries of that specific policy. +* `allow_instance_pool_create` - (Optional) Allow the service principal to have [instance pool](instance_pool.md) create priviliges. Defaults to false. More fine grained permissions could be assigned with [databricks_permissions](permissions.md#Instance-Pool-usage) and [instance_pool_id](permissions.md#instance_pool_id) argument. +* `active` - (Optional) Either service principal is active or not. True by default, but can be set to false in case of service principal deactivation with preserving service principal assets. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Canonical unique identifier for the service principal. + +## Import + +The resource scim service principal can be imported using id: + +```bash +$ terraform import databricks_service_principal.me +``` diff --git a/identity/acceptance/resource_service_principal_test.go b/identity/acceptance/resource_service_principal_test.go new file mode 100644 index 0000000000..48b3ef2765 --- /dev/null +++ b/identity/acceptance/resource_service_principal_test.go @@ -0,0 +1,49 @@ +package acceptance + +import ( + "os" + "testing" + + "github.com/databrickslabs/databricks-terraform/internal/acceptance" + "github.com/databrickslabs/databricks-terraform/internal/qa" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccServicePrincipalResource(t *testing.T) { + if _, ok := os.LookupEnv("CLOUD_ENV"); !ok { + t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set") + } + config := qa.EnvironmentTemplate(t, ` + data "databricks_group" "admins" { + display_name = "admins" + } + resource "databricks_service_principal" "sp_first" { + application_id = "00000000-1234-5678-0000-000000000001" + display_name = "Eerste {var.RANDOM}" + } + resource "databricks_service_principal" "sp_second" { + application_id = "00000000-1234-5678-0000-000000000002" + display_name = "Tweede {var.RANDOM}" + allow_cluster_create = true + } + resource "databricks_service_principal" "sp_third" { + application_id = "00000000-1234-5678-0000-000000000003" + allow_instance_pool_create = true + }`) + acceptance.AccTest(t, resource.TestCase{ + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_service_principal.sp_first", "allow_cluster_create", "false"), + resource.TestCheckResourceAttr("databricks_service_principal.sp_first", "allow_instance_pool_create", "false"), + resource.TestCheckResourceAttr("databricks_service_principal.sp_second", "allow_cluster_create", "true"), + resource.TestCheckResourceAttr("databricks_service_principal.sp_second", "allow_instance_pool_create", "false"), + resource.TestCheckResourceAttr("databricks_service_principal.sp_third", "allow_cluster_create", "false"), + resource.TestCheckResourceAttr("databricks_service_principal.sp_third", "allow_instance_pool_create", "true"), + ), + Destroy: false, + }, + }, + }) +} diff --git a/identity/resource_service_principal.go b/identity/resource_service_principal.go new file mode 100644 index 0000000000..f0177be489 --- /dev/null +++ b/identity/resource_service_principal.go @@ -0,0 +1,174 @@ +package identity + +import ( + "context" + "fmt" + "log" + "net/http" + + "github.com/databrickslabs/databricks-terraform/common" + "github.com/databrickslabs/databricks-terraform/internal" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// NewServicePrincipalsAPI creates ServicePrincipalsAPI instance from provider meta +func NewServicePrincipalsAPI(m interface{}) ServicePrincipalsAPI { + return ServicePrincipalsAPI{client: m.(*common.DatabricksClient)} +} + +// ServicePrincipalsAPI exposes the scim servicePrincipal API +type ServicePrincipalsAPI struct { + client *common.DatabricksClient +} + +// ServicePrincipalEntity entity from which resource schema is made +type ServicePrincipalEntity struct { + ApplicationID string `json:"application_id"` + DisplayName string `json:"display_name,omitempty" tf:"computed"` + Active bool `json:"active,omitempty"` + AllowClusterCreate bool `json:"allow_cluster_create,omitempty"` + AllowInstancePoolCreate bool `json:"allow_instance_pool_create,omitempty"` +} + +func (sp ServicePrincipalEntity) toRequest() ScimUser { + entitlements := []entitlementsListItem{} + if sp.AllowClusterCreate { + entitlements = append(entitlements, entitlementsListItem{ + Value: Entitlement("allow-cluster-create"), + }) + } + if sp.AllowInstancePoolCreate { + entitlements = append(entitlements, entitlementsListItem{ + Value: Entitlement("allow-instance-pool-create"), + }) + } + return ScimUser{ + Schemas: []URN{ServicePrincipalSchema}, + ApplicationID: sp.ApplicationID, + Active: sp.Active, + DisplayName: sp.DisplayName, + Entitlements: entitlements, + } +} + +// CreateR .. +func (a ServicePrincipalsAPI) CreateR(rsp ServicePrincipalEntity) (sp ScimUser, err error) { + err = a.client.Scim(http.MethodPost, "/preview/scim/v2/ServicePrincipals", rsp.toRequest(), &sp) + return sp, err +} + +// ReadR reads resource-friendly entity +func (a ServicePrincipalsAPI) ReadR(servicePrincipalID string) (rsp ServicePrincipalEntity, err error) { + servicePrincipal, err := a.read(servicePrincipalID) + if err != nil { + return + } + rsp.ApplicationID = servicePrincipal.ApplicationID + rsp.DisplayName = servicePrincipal.DisplayName + rsp.Active = servicePrincipal.Active + for _, ent := range servicePrincipal.Entitlements { + switch ent.Value { + case AllowClusterCreateEntitlement: + rsp.AllowClusterCreate = true + case AllowInstancePoolCreateEntitlement: + rsp.AllowInstancePoolCreate = true + } + } + return +} + +func (a ServicePrincipalsAPI) read(servicePrincipalID string) (sp ScimUser, err error) { + servicePrincipalPath := fmt.Sprintf("/preview/scim/v2/ServicePrincipals/%v", servicePrincipalID) + err = a.client.Scim(http.MethodGet, servicePrincipalPath, nil, &sp) + return +} + +// UpdateR replaces resource-friendly-entity +func (a ServicePrincipalsAPI) UpdateR(servicePrincipalID string, rsp ServicePrincipalEntity) error { + servicePrincipal, err := a.read(servicePrincipalID) + if err != nil { + return err + } + updateRequest := rsp.toRequest() + updateRequest.Groups = servicePrincipal.Groups + return a.client.Scim(http.MethodPut, + fmt.Sprintf("/preview/scim/v2/ServicePrincipals/%v", servicePrincipalID), + updateRequest, nil) +} + +// PatchR updates resource-friendly entity +func (a ServicePrincipalsAPI) PatchR(servicePrincipalID string, r patchRequest) error { + return a.client.Scim(http.MethodPatch, fmt.Sprintf("/preview/scim/v2/ServicePrincipals/%v", servicePrincipalID), r, nil) +} + +// Delete will delete the servicePrincipal given the servicePrincipal id +func (a ServicePrincipalsAPI) Delete(servicePrincipalID string) error { + servicePrincipalPath := fmt.Sprintf("/preview/scim/v2/ServicePrincipals/%v", servicePrincipalID) + return a.client.Scim(http.MethodDelete, servicePrincipalPath, nil, nil) +} + +// ResourceServicePrincipal manages service principals within workspace +func ResourceServicePrincipal() *schema.Resource { + servicePrincipalSchema := internal.StructToSchema(ServicePrincipalEntity{}, func( + s map[string]*schema.Schema) map[string]*schema.Schema { + s["application_id"].ForceNew = true + s["active"].Default = true + return s + }) + readContext := func(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + servicePrincipal, err := NewServicePrincipalsAPI(m).ReadR(d.Id()) + if e, ok := err.(common.APIError); ok && e.IsMissing() { + log.Printf("missing resource due to error: %v\n", e) + d.SetId("") + return nil + } + if err != nil { + return diag.FromErr(err) + } + err = internal.StructToData(servicePrincipal, servicePrincipalSchema, d) + if err != nil { + return diag.FromErr(err) + } + return nil + } + return &schema.Resource{ + Schema: servicePrincipalSchema, + ReadContext: readContext, + CreateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var ru ServicePrincipalEntity + err := internal.DataToStructPointer(d, servicePrincipalSchema, &ru) + if err != nil { + return diag.FromErr(err) + } + servicePrincipal, err := NewServicePrincipalsAPI(m).CreateR(ru) + if err != nil { + return diag.FromErr(err) + } + d.SetId(servicePrincipal.ID) + return readContext(ctx, d, m) + }, + UpdateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var ru ServicePrincipalEntity + err := internal.DataToStructPointer(d, servicePrincipalSchema, &ru) + if err != nil { + return diag.FromErr(err) + } + err = NewServicePrincipalsAPI(m).UpdateR(d.Id(), ru) + if err != nil { + return diag.FromErr(err) + } + return readContext(ctx, d, m) + }, + DeleteContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + err := NewServicePrincipalsAPI(m).Delete(d.Id()) + if err != nil { + return diag.FromErr(err) + } + return nil + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + } +} diff --git a/identity/resource_service_principal_test.go b/identity/resource_service_principal_test.go new file mode 100644 index 0000000000..497d5a9b18 --- /dev/null +++ b/identity/resource_service_principal_test.go @@ -0,0 +1,346 @@ +package identity + +import ( + "testing" + + "github.com/databrickslabs/databricks-terraform/common" + "github.com/databrickslabs/databricks-terraform/internal/qa" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResourceServicePrincipalRead(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Response: ScimUser{ + ID: "abc", + DisplayName: "Example Service Principal", + ApplicationID: "00000000-0000-0000-0000-000000000000", + Groups: []groupsListItem{ + { + Display: "admins", + Value: "4567", + }, + { + Display: "ds", + Value: "9877", + }, + }, + }, + }, + }, + Resource: ResourceServicePrincipal(), + New: true, + Read: true, + ID: "abc", + }.Apply(t) + require.NoError(t, err, err) + assert.Equal(t, "abc", d.Id(), "Id should not be empty") + assert.Equal(t, "00000000-0000-0000-0000-000000000000", d.Get("application_id")) + assert.Equal(t, "Example Service Principal", d.Get("display_name")) + assert.Equal(t, false, d.Get("allow_cluster_create")) +} + +func TestResourceServicePrincipalRead_NotFound(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Status: 404, + }, + }, + Resource: ResourceServicePrincipal(), + New: true, + Read: true, + ID: "abc", + }.Apply(t) + require.NoError(t, err, err) + assert.Equal(t, "", d.Id()) +} + +func TestResourceServicePrincipalRead_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Status: 400, + Response: common.APIErrorBody{ + ScimDetail: "Something", + ScimStatus: "Else", + }, + }, + }, + Resource: ResourceServicePrincipal(), + New: true, + Read: true, + ID: "abc", + }.Apply(t) + require.Error(t, err) + assert.Equal(t, "abc", d.Id()) +} + +func TestResourceServicePrincipalCreate(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals", + ExpectedRequest: ScimUser{ + DisplayName: "Example Service Principal", + Active: true, + Entitlements: []entitlementsListItem{ + { + Value: "allow-cluster-create", + }, + }, + ApplicationID: "00000000-0000-0000-0000-000000000000", + Schemas: []URN{ServicePrincipalSchema}, + }, + Response: ScimUser{ + ID: "abc", + }, + }, + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Response: ScimUser{ + DisplayName: "Example Service Principal", + Active: true, + ApplicationID: "00000000-0000-0000-0000-000000000000", + ID: "abc", + Entitlements: []entitlementsListItem{ + { + Value: AllowClusterCreateEntitlement, + }, + }, + Groups: []groupsListItem{ + { + Display: "admins", + Value: "4567", + }, + { + Display: "ds", + Value: "9877", + }, + }, + }, + }, + }, + Resource: ResourceServicePrincipal(), + Create: true, + HCL: ` + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Example Service Principal" + allow_cluster_create = true + `, + }.Apply(t) + require.NoError(t, err, err) + assert.Equal(t, "abc", d.Id(), "Id should not be empty") + assert.Equal(t, "00000000-0000-0000-0000-000000000000", d.Get("application_id")) + assert.Equal(t, "Example Service Principal", d.Get("display_name")) + assert.Equal(t, true, d.Get("allow_cluster_create")) +} + +func TestResourceServicePrincipalCreate_Error(t *testing.T) { + _, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals", + Status: 400, + }, + }, + Resource: ResourceServicePrincipal(), + Create: true, + HCL: ` + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Example Service Principal" + allow_cluster_create = true + `, + }.Apply(t) + require.Error(t, err, err) +} + +func TestResourceServicePrincipalUpdate(t *testing.T) { + newServicePrincipal := ScimUser{ + Schemas: []URN{ServicePrincipalSchema}, + DisplayName: "Changed Name", + ApplicationID: "00000000-0000-0000-0000-000000000000", + Active: true, + Entitlements: []entitlementsListItem{ + { + Value: AllowInstancePoolCreateEntitlement, + }, + }, + Groups: []groupsListItem{ + { + Display: "admins", + Value: "4567", + }, + { + Display: "ds", + Value: "9877", + }, + }, + } + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Response: ScimUser{ + DisplayName: "Example Service Principal", + Active: true, + ApplicationID: "00000000-0000-0000-0000-000000000000", + ID: "abc", + Entitlements: []entitlementsListItem{ + { + Value: AllowClusterCreateEntitlement, + }, + }, + Groups: []groupsListItem{ + { + Display: "admins", + Value: "4567", + }, + { + Display: "ds", + Value: "9877", + }, + }, + }, + }, + { + Method: "PUT", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + ExpectedRequest: newServicePrincipal, + }, + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Response: newServicePrincipal, + }, + }, + Resource: ResourceServicePrincipal(), + Update: true, + ID: "abc", + HCL: ` + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Changed Name" + allow_cluster_create = false + allow_instance_pool_create = true + `, + }.Apply(t) + require.NoError(t, err, err) + assert.Equal(t, "abc", d.Id(), "Id should not be empty") + assert.Equal(t, "00000000-0000-0000-0000-000000000000", d.Get("application_id")) + assert.Equal(t, "Changed Name", d.Get("display_name")) + assert.Equal(t, false, d.Get("allow_cluster_create")) + assert.Equal(t, true, d.Get("allow_instance_pool_create")) +} + +func TestResourceServicePrincipalUpdate_Error(t *testing.T) { + _, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Status: 400, + }, + }, + Resource: ResourceServicePrincipal(), + Update: true, + ID: "abc", + HCL: ` + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Changed Name" + allow_cluster_create = false + allow_instance_pool_create = true + `, + }.Apply(t) + require.Error(t, err, err) +} + +func TestResourceServicePrincipalUpdate_ErrorPut(t *testing.T) { + _, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Response: ScimUser{ + DisplayName: "Example Service Principal", + Active: true, + ApplicationID: "00000000-0000-0000-0000-000000000000", + ID: "abc", + Entitlements: []entitlementsListItem{ + { + Value: AllowClusterCreateEntitlement, + }, + }, + Groups: []groupsListItem{ + { + Display: "admins", + Value: "4567", + }, + { + Display: "ds", + Value: "9877", + }, + }, + }, + }, + { + Method: "PUT", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Status: 400, + }, + }, + Resource: ResourceServicePrincipal(), + Update: true, + ID: "abc", + HCL: ` + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Changed Name" + allow_cluster_create = false + allow_instance_pool_create = true + `, + }.Apply(t) + require.Error(t, err, err) +} + +func TestResourceServicePrincipalDelete(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "DELETE", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + }, + }, + Resource: ResourceServicePrincipal(), + Delete: true, + ID: "abc", + }.Apply(t) + require.NoError(t, err, err) + assert.Equal(t, "abc", d.Id(), "Id should not be empty") +} + +func TestResourceServicePrincipalDelete_Error(t *testing.T) { + _, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "DELETE", + Resource: "/api/2.0/preview/scim/v2/ServicePrincipals/abc", + Status: 400, + }, + }, + Resource: ResourceServicePrincipal(), + Delete: true, + ID: "abc", + }.Apply(t) + require.Error(t, err, err) +} diff --git a/identity/scim.go b/identity/scim.go index db81ce68b5..92faedd38f 100644 --- a/identity/scim.go +++ b/identity/scim.go @@ -5,10 +5,11 @@ type URN string // Possible schema URNs for the Databricks SCIM api const ( - UserSchema URN = "urn:ietf:params:scim:schemas:core:2.0:User" - WorkspaceUserSchema URN = "urn:ietf:params:scim:schemas:extension:workspace:2.0:User" - PatchOp URN = "urn:ietf:params:scim:api:messages:2.0:PatchOp" - GroupSchema URN = "urn:ietf:params:scim:schemas:core:2.0:Group" + UserSchema URN = "urn:ietf:params:scim:schemas:core:2.0:User" + ServicePrincipalSchema URN = "urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal" + WorkspaceUserSchema URN = "urn:ietf:params:scim:schemas:extension:workspace:2.0:User" + PatchOp URN = "urn:ietf:params:scim:api:messages:2.0:PatchOp" + GroupSchema URN = "urn:ietf:params:scim:schemas:core:2.0:Group" ) // MembersValue is a list of value items for the members path @@ -149,16 +150,17 @@ type email struct { // ScimUser is a struct that contains all the information about a SCIM user type ScimUser struct { - ID string `json:"id,omitempty"` - Emails []email `json:"emails,omitempty"` - DisplayName string `json:"displayName,omitempty"` - Active bool `json:"active,omitempty"` - Schemas []URN `json:"schemas,omitempty"` - UserName string `json:"userName,omitempty"` - Groups []groupsListItem `json:"groups,omitempty"` - Name map[string]string `json:"name,omitempty"` - Roles []roleListItem `json:"roles,omitempty"` - Entitlements []entitlementsListItem `json:"entitlements,omitempty"` + ID string `json:"id,omitempty"` + Emails []email `json:"emails,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Active bool `json:"active,omitempty"` + Schemas []URN `json:"schemas,omitempty"` + UserName string `json:"userName,omitempty"` + ApplicationID string `json:"application_id,omitempty"` + Groups []groupsListItem `json:"groups,omitempty"` + Name map[string]string `json:"name,omitempty"` + Roles []roleListItem `json:"roles,omitempty"` + Entitlements []entitlementsListItem `json:"entitlements,omitempty"` } // HasRole returns true if group has a role diff --git a/provider/provider.go b/provider/provider.go index 15b7e9cc82..b7873234a9 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -50,6 +50,7 @@ func DatabricksProvider() *schema.Provider { "databricks_group_member": identity.ResourceGroupMember(), "databricks_token": identity.ResourceToken(), "databricks_user": identity.ResourceUser(), + "databricks_service_principal": identity.ResourceServicePrincipal(), "databricks_mws_customer_managed_keys": mws.ResourceCustomerManagedKey(), "databricks_mws_credentials": mws.ResourceCredentials(),