diff --git a/.codegen/api.go.tmpl b/.codegen/api.go.tmpl index 4c9e70e0c..900b6f70a 100644 --- a/.codegen/api.go.tmpl +++ b/.codegen/api.go.tmpl @@ -168,9 +168,9 @@ func (a *{{.Service.Name}}API) {{.PascalName}}All(ctx context.Context{{if .Reque var totalCount {{template "type" .Pagination.Limit.Entity}} = 0 {{ end -}} ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - {{if .Pagination.NeedsOffsetDedupe -}} + {{if .NeedsOffsetDedupe -}} // deduplicate items that may have been added during iteration - seen := map[{{template "type" .Pagination.Entity.IdentifierField.Entity}}]bool{} + seen := map[{{template "type" .IdentifierField.Entity}}]bool{} {{end}}{{if eq .Pagination.Increment 1 -}} request.{{.Pagination.Offset.PascalName}} = 1 // start iterating from the first page {{end}}for { @@ -182,8 +182,8 @@ func (a *{{.Service.Name}}API) {{.PascalName}}All(ctx context.Context{{if .Reque break } for _, v := range response.{{.Pagination.Results.PascalName}} { - {{- if .Pagination.NeedsOffsetDedupe -}} - id := v.{{.Pagination.Entity.IdentifierField.PascalName}} + {{- if .NeedsOffsetDedupe -}} + id := v{{ template "field-path" .IdFieldPath }} if seen[id] { // item was added during iteration continue @@ -228,9 +228,9 @@ func (a *{{.Service.Name}}API) {{.PascalName}}All(ctx context.Context{{if .Reque return response.{{.Pagination.Results.PascalName}}, nil{{else}}return a.impl.{{.PascalName}}(ctx, request){{end}} } {{end}}{{if .NamedIdMap}} -// {{.NamedIdMap.PascalName}} calls [{{.Service.Name}}API.{{.PascalName}}{{if not .NamedIdMap.Direct}}All{{end -}}] and creates a map of results with [{{.NamedIdMap.Entity.PascalName}}]{{range .NamedIdMap.NamePath}}.{{.PascalName}}{{end}} as key and [{{.NamedIdMap.Entity.PascalName}}].{{.NamedIdMap.Id.PascalName}} as value. +// {{.NamedIdMap.PascalName}} calls [{{.Service.Name}}API.{{.PascalName}}{{if not .NamedIdMap.Direct}}All{{end -}}] and creates a map of results with [{{.NamedIdMap.Entity.PascalName}}]{{ template "field-path" .NamedIdMap.NamePath }} as key and [{{.NamedIdMap.Entity.PascalName}}]{{ template "field-path" .NamedIdMap.IdPath}} as value. // -// Returns an error if there's more than one [{{.NamedIdMap.Entity.PascalName}}] with the same {{range .NamedIdMap.NamePath}}.{{.PascalName}}{{end}}. +// Returns an error if there's more than one [{{.NamedIdMap.Entity.PascalName}}] with the same {{ template "field-path" .NamedIdMap.NamePath }}. // // Note: All [{{.NamedIdMap.Entity.PascalName}}] instances are loaded into memory before creating a map. // @@ -243,19 +243,19 @@ func (a *{{.Service.Name}}API) {{.NamedIdMap.PascalName}}(ctx context.Context{{i return nil, err } for _, v := range result { - key := v{{range .NamedIdMap.NamePath}}.{{.PascalName}}{{end}} + key := v{{ template "field-path" .NamedIdMap.NamePath }} _, duplicate := mapping[key] if duplicate { - return nil, fmt.Errorf("duplicate {{range .NamedIdMap.NamePath}}.{{.PascalName}}{{end}}: %s", key) + return nil, fmt.Errorf("duplicate {{ template "field-path" .NamedIdMap.NamePath }}: %s", key) } - mapping[key] = v.{{.NamedIdMap.Id.PascalName}} + mapping[key] = v{{ template "field-path" .NamedIdMap.IdPath }} } return mapping, nil } {{end}}{{if .GetByName}} // GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}} calls [{{.Service.Name}}API.{{.NamedIdMap.Entity.PascalName}}{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}To{{.NamedIdMap.Id.PascalName}}Map] and returns a single [{{.GetByName.PascalName}}]. // -// Returns an error if there's more than one [{{.NamedIdMap.Entity.PascalName}}] with the same {{range .NamedIdMap.NamePath}}.{{.PascalName}}{{end}}. +// Returns an error if there's more than one [{{.NamedIdMap.Entity.PascalName}}] with the same {{ template "field-path" .NamedIdMap.NamePath }}. // // Note: All [{{.NamedIdMap.Entity.PascalName}}] instances are loaded into memory before returning matching by name. // @@ -268,7 +268,7 @@ func (a *{{.Service.Name}}API) GetBy{{range .NamedIdMap.NamePath}}{{.PascalName} } tmp := map[string][]{{.GetByName.PascalName}}{} for _, v := range result { - key := v{{range .NamedIdMap.NamePath}}.{{.PascalName}}{{end}} + key := v{{ template "field-path" .NamedIdMap.NamePath }} tmp[key] = append(tmp[key], v) } alternatives, ok := tmp[name] @@ -298,4 +298,7 @@ func (a *{{.Service.Name}}API) {{.Shortcut.PascalName}}AndWait(ctx context.Conte {{end}} {{end -}} -{{- end}} \ No newline at end of file +{{- end}} +{{- define "field-path" -}} + {{- range .}}.{{.PascalName}}{{end}} +{{- end -}} \ No newline at end of file diff --git a/account_client.go b/account_client.go index 1ac73ae1b..512a4f978 100755 --- a/account_client.go +++ b/account_client.go @@ -223,7 +223,16 @@ type AccountClient struct { // data by accident. ServicePrincipals *iam.AccountServicePrincipalsAPI - // TBD + // The Personal Compute enablement setting lets you control which users can + // use the Personal Compute default policy to create compute resources. By + // default all users in all workspaces have access (ON), but you can change + // the setting to instead let individual workspaces configure access control + // (DELEGATE). + // + // There is only one instance of this setting per account. Since this + // setting has a default value, this setting is present on all accounts even + // though it's never set on a given account. Deletion reverts the value of + // the setting back to the default value. Settings *settings.AccountSettingsAPI // These APIs manage storage configurations for this workspace. A root diff --git a/openapi/code/entity.go b/openapi/code/entity.go index 737144073..704225255 100644 --- a/openapi/code/entity.go +++ b/openapi/code/entity.go @@ -57,12 +57,6 @@ type Entity struct { // this field does not have a concrete type IsAny bool - // this field holds the identifier of the entity - IsIdentifier bool - - // this field holds a name of the entity - IsName bool - // this field is computed on the platform side IsComputed bool @@ -111,6 +105,26 @@ func (e *Entity) Field(name string) *Field { return &field } +// Given a list of field names, return the list of *Field objects which result +// from following the path of fields in the entity. +func (e *Entity) GetUnderlyingFields(path []string) ([]*Field, error) { + if len(path) == 0 { + return nil, fmt.Errorf("empty path is not allowed (entity: %s)", e.FullName()) + } + if len(path) == 1 { + return []*Field{e.Field(path[0])}, nil + } + field := e.Field(path[0]) + if field == nil { + return nil, fmt.Errorf("field %s not found in entity %s", path[0], e.FullName()) + } + rest, err := field.Entity.GetUnderlyingFields(path[1:]) + if err != nil { + return nil, err + } + return append([]*Field{field}, rest...), nil +} + // IsObject returns true if entity is not a Mpa and has more than zero fields func (e *Entity) IsObject() bool { return e.MapValue == nil && len(e.fields) > 0 @@ -182,42 +196,6 @@ func (e *Entity) HasJsonField() bool { return false } -// Does this type have x-databricks-id field? -func (e *Entity) HasIdentifierField() bool { - return e.IdentifierField() != nil -} - -// Return field with x-databricks-id -func (e *Entity) IdentifierField() *Field { - for _, v := range e.fields { - if v.Entity.IsIdentifier { - return &v - } - } - return nil -} - -// Does this type have x-databricks-name field? -func (e *Entity) HasNameField() bool { - for _, v := range e.fields { - if v.Entity.IsName { - return true - } - } - return false -} - -// Does this type have a single x-databricks-name field? -func (e *Entity) HasSingleNameField() bool { - count := 0 - for _, v := range e.fields { - if v.Entity.IsName { - count++ - } - } - return (count == 1) -} - // Enum returns all entries for enum entities func (e *Entity) Enum() (enum []EnumEntry) { for _, v := range e.enum { diff --git a/openapi/code/method.go b/openapi/code/method.go index 473457f8c..2f532ae12 100644 --- a/openapi/code/method.go +++ b/openapi/code/method.go @@ -23,10 +23,19 @@ type Method struct { // Response type representation Response *Entity EmptyResponseName Named - wait *openapi.Wait - pagination *openapi.Pagination - operation *openapi.Operation - shortcut bool + + // For list APIs, the path of fields in the response entity to follow to get + // the resource ID. + IdFieldPath []*Field + + // For list APIs, the path of fields in the response entity to follow to get + // the user-friendly name of the resource. + NameFieldPath []*Field + + wait *openapi.Wait + pagination *openapi.Pagination + operation *openapi.Operation + shortcut bool } // Shortcut holds definition of "shortcut" methods, that are generated for @@ -58,7 +67,7 @@ type Pagination struct { // drop-downs or any other selectors. type NamedIdMap struct { Named - Id *Field + IdPath []*Field NamePath []*Field Entity *Entity @@ -158,6 +167,21 @@ func (m *Method) IsJsonOnly() bool { return m.operation.JsonOnly } +func (m *Method) HasIdentifierField() bool { + return len(m.IdFieldPath) > 0 +} + +func (m *Method) IdentifierField() *Field { + if !m.HasIdentifierField() { + return nil + } + return m.IdFieldPath[len(m.IdFieldPath)-1] +} + +func (m *Method) HasNameField() bool { + return len(m.NameFieldPath) > 0 +} + // Wait returns definition for long-running operation func (m *Method) Wait() *Wait { if m.wait == nil { @@ -228,8 +252,9 @@ func (m *Method) paginationItem() *Entity { return p.Entity } -func (p *Pagination) NeedsOffsetDedupe() bool { - return p.Offset != nil && p.Entity.HasIdentifierField() +func (m *Method) NeedsOffsetDedupe() bool { + p := m.Pagination() + return p.Offset != nil && m.HasIdentifierField() } func (p *Pagination) MultiRequest() bool { @@ -240,66 +265,32 @@ func (p *Pagination) MultiRequest() bool { // entities of a type func (m *Method) NamedIdMap() *NamedIdMap { entity := m.paginationItem() - if entity == nil { - return nil - } - if !entity.HasIdentifierField() { - return nil - } - if !entity.HasSingleNameField() { - return nil - } - var id *Field - var namePath []*Field - for _, f := range entity.fields { - if f.Schema == nil { - continue - } - local := f - if f.Schema.IsIdentifier { - id = &local - } - if f.Entity.IsName { - namePath = append(namePath, &local) - if !f.Entity.IsObject() { - continue - } - if !f.Entity.HasNameField() { - continue - } - // job list: {"id": 1234, "settings": {"name": "..."}} - for _, innerField := range f.Entity.fields { - if innerField.Schema == nil { - continue - } - if innerField.Schema.IsName { - local2 := innerField - namePath = append(namePath, &local2) - } - } - } - } - if len(namePath) == 0 { + if entity == nil || !m.HasIdentifierField() || !m.HasNameField() { return nil } + namePath := m.NameFieldPath nameParts := []string{entity.PascalName()} for _, v := range namePath { nameParts = append(nameParts, v.PascalName()) } nameParts = append(nameParts, "To") - nameParts = append(nameParts, id.PascalName()) + nameParts = append(nameParts, m.IdentifierField().PascalName()) nameParts = append(nameParts, "Map") return &NamedIdMap{ Named: Named{ Name: strings.Join(nameParts, ""), }, - Id: id, + IdPath: m.IdFieldPath, NamePath: namePath, Entity: entity, Direct: m.Response.ArrayValue != nil, } } +func (n *NamedIdMap) Id() *Field { + return n.IdPath[len(n.IdPath)-1] +} + // GetByName returns entity from the same service with x-databricks-crud:read func (m *Method) GetByName() *Entity { n := m.NamedIdMap() diff --git a/openapi/code/package.go b/openapi/code/package.go index b560f591e..b3e44bd94 100644 --- a/openapi/code/package.go +++ b/openapi/code/package.go @@ -153,8 +153,6 @@ func (pkg *Package) schemaToEntity(s *openapi.Schema, path []string, hasName boo } e.IsEmpty = s.IsEmpty() e.IsAny = s.IsAny || s.Type == "object" && s.IsEmpty() - e.IsIdentifier = s.IsIdentifier - e.IsName = s.IsName e.IsComputed = s.IsComputed e.RequiredOrder = s.Required // enum diff --git a/openapi/code/service.go b/openapi/code/service.go index 8a31ebbb4..f446c9824 100644 --- a/openapi/code/service.go +++ b/openapi/code/service.go @@ -256,6 +256,23 @@ func (svc *Service) newMethod(verb, path string, params []openapi.Parameter, op } description = fmt.Sprintf("%s\n\n%s", summary, description) } + + var nameFieldPath, idFieldPath []*Field + respEntity := getPaginationEntity(response, op.Pagination) + if op.HasNameField() && respEntity != nil { + nameField, err := respEntity.GetUnderlyingFields(op.NameField) + if err != nil { + panic(fmt.Errorf("[%s] could not find name field %q: %w", op.OperationId, op.NameField, err)) + } + nameFieldPath = nameField + } + if op.HasIdentifierField() && respEntity != nil { + idField, err := respEntity.GetUnderlyingFields(op.IdField) + if err != nil { + panic(fmt.Errorf("[%s] could not find id field %q: %w", op.OperationId, op.IdField, err)) + } + idFieldPath = idField + } return &Method{ Named: Named{name, description}, Service: svc, @@ -265,6 +282,8 @@ func (svc *Service) newMethod(verb, path string, params []openapi.Parameter, op PathParts: svc.paramPath(path, request, params), Response: response, EmptyResponseName: emptyResponse, + NameFieldPath: nameFieldPath, + IdFieldPath: idFieldPath, wait: op.Wait, operation: op, pagination: op.Pagination, @@ -309,3 +328,13 @@ func (svc *Service) IsPrivatePreview() bool { func (svc *Service) IsPublicPreview() bool { return isPublicPreview(&svc.tag.Node) } + +func getPaginationEntity(entity *Entity, pagination *openapi.Pagination) *Entity { + if pagination == nil { + return nil + } + if pagination.Inline { + return entity.ArrayValue + } + return entity.Field(pagination.Results).Entity.ArrayValue +} diff --git a/openapi/model.go b/openapi/model.go index bc5348098..b63c9a93d 100644 --- a/openapi/model.go +++ b/openapi/model.go @@ -85,14 +85,29 @@ func (path *Path) Verbs() map[string]*Operation { return m } +type fieldPath []string + +func (fp fieldPath) String() string { + return strings.Join(fp, ".") +} + // Operation is the equivalent of method type Operation struct { Node - Wait *Wait `json:"x-databricks-wait,omitempty"` - Pagination *Pagination `json:"x-databricks-pagination,omitempty"` - Shortcut bool `json:"x-databricks-shortcut,omitempty"` - Crud string `json:"x-databricks-crud,omitempty"` - JsonOnly bool `json:"x-databricks-cli-json-only,omitempty"` + Wait *Wait `json:"x-databricks-wait,omitempty"` + Pagination *Pagination `json:"x-databricks-pagination,omitempty"` + Shortcut bool `json:"x-databricks-shortcut,omitempty"` + Crud string `json:"x-databricks-crud,omitempty"` + JsonOnly bool `json:"x-databricks-cli-json-only,omitempty"` + + // For list APIs, the path to the field in the response entity that contains + // the resource ID. + IdField fieldPath `json:"x-databricks-id,omitempty"` + + // For list APIs, the path to the field in the response entity that contains + // the user-friendly name of the resource. + NameField fieldPath `json:"x-databricks-name,omitempty"` + Summary string `json:"summary,omitempty"` OperationId string `json:"operationId"` Tags []string `json:"tags"` @@ -132,6 +147,14 @@ func (o *Operation) SuccessResponseSchema(c *Components) *Schema { return nil } +func (o *Operation) HasNameField() bool { + return len(o.NameField) > 0 +} + +func (o *Operation) HasIdentifierField() bool { + return len(o.IdField) > 0 +} + type node interface { IsRef() bool Component() string @@ -158,8 +181,6 @@ type Components struct { type Schema struct { Node - IsIdentifier bool `json:"x-databricks-id,omitempty"` - IsName bool `json:"x-databricks-name,omitempty"` IsComputed bool `json:"x-databricks-computed,omitempty"` IsAny bool `json:"x-databricks-any,omitempty"` Type string `json:"type,omitempty"` diff --git a/service/catalog/api.go b/service/catalog/api.go index c382804d4..d789c37a4 100755 --- a/service/catalog/api.go +++ b/service/catalog/api.go @@ -1833,6 +1833,17 @@ func (a *TablesAPI) ListSummariesAll(ctx context.Context, request ListSummariesR return results, nil } +// Update a table owner. +// +// Change the owner of the table. The caller must be the owner of the parent +// catalog, have the **USE_CATALOG** privilege on the parent catalog and be the +// owner of the parent schema, or be the owner of the table and have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +func (a *TablesAPI) Update(ctx context.Context, request UpdateTableRequest) error { + return a.impl.Update(ctx, request) +} + func NewVolumes(client *client.DatabricksClient) *VolumesAPI { return &VolumesAPI{ impl: &volumesImpl{ diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 1d5de4107..60611f077 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -545,6 +545,12 @@ func (a *tablesImpl) ListSummaries(ctx context.Context, request ListSummariesReq return &listTableSummariesResponse, err } +func (a *tablesImpl) Update(ctx context.Context, request UpdateTableRequest) error { + path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v", request.FullName) + err := a.client.Do(ctx, http.MethodPatch, path, request, nil) + return err +} + // unexported type that holds implementations of just Volumes API methods type volumesImpl struct { client *client.DatabricksClient diff --git a/service/catalog/interface.go b/service/catalog/interface.go index 4d1f605a6..39b5f5e51 100755 --- a/service/catalog/interface.go +++ b/service/catalog/interface.go @@ -713,6 +713,15 @@ type TablesService interface { // // Use ListSummariesAll() to get all TableSummary instances, which will iterate over every result page. ListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) + + // Update a table owner. + // + // Change the owner of the table. The caller must be the owner of the parent + // catalog, have the **USE_CATALOG** privilege on the parent catalog and be + // the owner of the parent schema, or be the owner of the table and have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + Update(ctx context.Context, request UpdateTableRequest) error } // Volumes are a Unity Catalog (UC) capability for accessing, storing, diff --git a/service/catalog/model.go b/service/catalog/model.go index 2ab5a593e..138c6d36a 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -197,7 +197,7 @@ type ColumnInfo struct { } type ColumnMask struct { - // The full name of the column maks SQL UDF. + // The full name of the column mask SQL UDF. FunctionName string `json:"function_name,omitempty"` // The list of additional table columns to be passed as input to the column // mask function. The first arg of the mask function should be of the type @@ -2351,6 +2351,14 @@ type UpdateStorageCredential struct { SkipValidation bool `json:"skip_validation,omitempty"` } +// Update a table owner. +type UpdateTableRequest struct { + // Full name of the table. + FullName string `json:"-" url:"-"` + + Owner string `json:"owner,omitempty"` +} + type UpdateVolumeRequestContent struct { // The comment attached to the volume Comment string `json:"comment,omitempty"` diff --git a/service/compute/api.go b/service/compute/api.go index 323eee33d..9fa6be509 100755 --- a/service/compute/api.go +++ b/service/compute/api.go @@ -1872,16 +1872,26 @@ func (a *PolicyFamiliesAPI) Impl() PolicyFamiliesService { return a.impl } +// Get policy family information. +// +// Retrieve the information for an policy family based on its identifier. func (a *PolicyFamiliesAPI) Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) { return a.impl.Get(ctx, request) } +// Get policy family information. +// +// Retrieve the information for an policy family based on its identifier. func (a *PolicyFamiliesAPI) GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error) { return a.impl.Get(ctx, GetPolicyFamilyRequest{ PolicyFamilyId: policyFamilyId, }) } +// List policy families. +// +// Retrieve a list of policy families. This API is paginated. +// // This method is generated by Databricks SDK Code Generator. func (a *PolicyFamiliesAPI) ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) { var results []PolicyFamily diff --git a/service/compute/interface.go b/service/compute/interface.go index d5e029c8b..f8433a1e3 100755 --- a/service/compute/interface.go +++ b/service/compute/interface.go @@ -504,8 +504,15 @@ type LibrariesService interface { // create cluster policies using a policy family. Cluster policies created using // a policy family inherit the policy family's policy definition. type PolicyFamiliesService interface { + + // Get policy family information. + // + // Retrieve the information for an policy family based on its identifier. Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) + // List policy families. + // + // Retrieve a list of policy families. This API is paginated. // // Use ListAll() to get all PolicyFamily instances, which will iterate over every result page. List(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) diff --git a/service/compute/model.go b/service/compute/model.go index 9f1f265a0..bde8f1bfb 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -19,13 +19,11 @@ type AddInstanceProfile struct { // The AWS ARN of the instance profile to register with Databricks. This // field is required. InstanceProfileArn string `json:"instance_profile_arn"` - // By default, Databricks validates that it has sufficient permissions to - // launch instances with the instance profile. This validation uses AWS - // dry-run mode for the RunInstances API. If validation fails with an error - // message that does not indicate an IAM related permission issue, (e.g. - // `Your requested instance type is not supported in your requested - // availability zone`), you can pass this flag to skip the validation and - // forcibly add the instance profile. + // Boolean flag indicating whether the instance profile should only be used + // in credential passthrough scenarios. If true, it means the instance + // profile contains an meta IAM role which could assume a wide range of + // roles. Therefore it should always be used with authorization. This field + // is optional, the default value is `false`. IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"` // By default, Databricks validates that it has sufficient permissions to // launch instances with the instance profile. This validation uses AWS @@ -2132,6 +2130,7 @@ type GetInstancePoolRequest struct { InstancePoolId string `json:"-" url:"instance_pool_id"` } +// Get policy family information type GetPolicyFamilyRequest struct { PolicyFamilyId string `json:"-" url:"-"` } @@ -2525,13 +2524,11 @@ type InstanceProfile struct { // The AWS ARN of the instance profile to register with Databricks. This // field is required. InstanceProfileArn string `json:"instance_profile_arn"` - // By default, Databricks validates that it has sufficient permissions to - // launch instances with the instance profile. This validation uses AWS - // dry-run mode for the RunInstances API. If validation fails with an error - // message that does not indicate an IAM related permission issue, (e.g. - // `Your requested instance type is not supported in your requested - // availability zone`), you can pass this flag to skip the validation and - // forcibly add the instance profile. + // Boolean flag indicating whether the instance profile should only be used + // in credential passthrough scenarios. If true, it means the instance + // profile contains an meta IAM role which could assume a wide range of + // roles. Therefore it should always be used with authorization. This field + // is optional, the default value is `false`. IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"` } @@ -2703,6 +2700,7 @@ type ListPoliciesResponse struct { Policies []Policy `json:"policies,omitempty"` } +// List policy families type ListPolicyFamiliesRequest struct { // The max number of policy families to return. MaxResults int64 `json:"-" url:"max_results,omitempty"` diff --git a/service/iam/model.go b/service/iam/model.go index 006c47065..3d89f4c51 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -188,7 +188,7 @@ type Group struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks group ID - Id string `json:"id,omitempty" url:"-"` + Id string `json:"id,omitempty"` Members []ComplexValue `json:"members,omitempty"` // Container for the group identifier. Workspace local versus account. @@ -431,7 +431,10 @@ type PartialUpdate struct { // Unique ID for a user in the Databricks workspace. Id string `json:"-" url:"-"` - Operations []Patch `json:"operations,omitempty"` + Operations []Patch `json:"Operations,omitempty"` + // The schema of the patch request. Must be + // ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + Schema []PatchSchema `json:"schema,omitempty"` } type Patch struct { @@ -440,7 +443,7 @@ type Patch struct { // Selection of patch operation Path string `json:"path,omitempty"` // Value to modify - Value string `json:"value,omitempty"` + Value any `json:"value,omitempty"` } // Type of patch operation. @@ -473,6 +476,31 @@ func (f *PatchOp) Type() string { return "PatchOp" } +type PatchSchema string + +const PatchSchemaUrnIetfParamsScimApiMessagesPatchop PatchSchema = `urn:ietf:params:scim:api:messages:2.0:PatchOp` + +// String representation for [fmt.Print] +func (f *PatchSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PatchSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:api:messages:2.0:PatchOp`: + *f = PatchSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:api:messages:2.0:PatchOp"`, v) + } +} + +// Type always returns PatchSchema to satisfy [pflag.Value] interface +func (f *PatchSchema) Type() string { + return "PatchSchema" +} + type Permission struct { Inherited bool `json:"inherited,omitempty"` diff --git a/service/jobs/model.go b/service/jobs/model.go index 50eb5024e..0bd746b36 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -118,6 +118,8 @@ type BaseRun struct { Tasks []RunTask `json:"tasks,omitempty"` // This describes an enum Trigger TriggerType `json:"trigger,omitempty"` + + TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` } type CancelAllRuns struct { @@ -536,6 +538,9 @@ type GitSource struct { // URL of the repository to be cloned by this job. The maximum length is 300 // characters. GitUrl string `json:"git_url"` + // The source of the job specification in the remote repository when the job + // is source controlled. + JobSource *JobSource `json:"job_source,omitempty"` } type Job struct { @@ -722,6 +727,50 @@ type JobSettings struct { WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` } +// The source of the job specification in the remote repository when the job is +// source controlled. +type JobSource struct { + // This describes an enum + DirtyState JobSourceDirtyState `json:"dirty_state,omitempty"` + // Name of the branch which the job is imported from. + ImportFromGitBranch string `json:"import_from_git_branch"` + // Path of the job YAML file that contains the job specification. + JobConfigPath string `json:"job_config_path"` +} + +// This describes an enum +type JobSourceDirtyState string + +// The job is temporary disconnected from the remote job specification and is +// allowed for live edit. Import the remote job specification again from UI to +// make the job fully synced. +const JobSourceDirtyStateDisconnected JobSourceDirtyState = `DISCONNECTED` + +// The job is not yet synced with the remote job specification. Import the +// remote job specification from UI to make the job fully synced. +const JobSourceDirtyStateNotSynced JobSourceDirtyState = `NOT_SYNCED` + +// String representation for [fmt.Print] +func (f *JobSourceDirtyState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobSourceDirtyState) Set(v string) error { + switch v { + case `DISCONNECTED`, `NOT_SYNCED`: + *f = JobSourceDirtyState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "NOT_SYNCED"`, v) + } +} + +// Type always returns JobSourceDirtyState to satisfy [pflag.Value] interface +func (f *JobSourceDirtyState) Type() string { + return "JobSourceDirtyState" +} + // List jobs type ListJobsRequest struct { // Whether to include task and cluster details in the response. @@ -1258,6 +1307,8 @@ type Run struct { Tasks []RunTask `json:"tasks,omitempty"` // This describes an enum Trigger TriggerType `json:"trigger,omitempty"` + + TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` } type RunConditionTask struct { @@ -1350,6 +1401,18 @@ func (f *RunIf) Type() string { return "RunIf" } +type RunJobOutput struct { + // The run id of the triggered job run + RunId int `json:"run_id,omitempty"` +} + +type RunJobTask struct { + // ID of the job to trigger. + JobId int `json:"job_id"` + // Job-level parameters used to trigger the job. + JobParameters any `json:"job_parameters,omitempty"` +} + // This describes an enum type RunLifeCycleState string @@ -1551,6 +1614,8 @@ type RunOutput struct { // [ClusterLogConf](/dev-tools/api/latest/clusters.html#clusterlogconf) // field to configure log storage for the job cluster. NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"` + // The output of a run job task, if available + RunJobOutput *RunJobOutput `json:"run_job_output,omitempty"` // The output of a SQL task, if available. SqlOutput *SqlOutput `json:"sql_output,omitempty"` } @@ -1782,6 +1847,8 @@ type RunTask struct { // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf RunIf `json:"run_if,omitempty"` + // If run_job_task, indicates that this task must execute another job. + RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -2287,6 +2354,8 @@ type Task struct { // executed * `AT_LEAST_ONE_FAILED`: At least one dependency failed * // `ALL_FAILED`: ALl dependencies have failed RunIf RunIf `json:"run_if,omitempty"` + // If run_job_task, indicates that this task must execute another job. + RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // If spark_jar_task, indicates that this task must run a JAR. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` // If spark_python_task, indicates that this task must run a Python file. @@ -2366,6 +2435,11 @@ type TriggerHistory struct { LastTriggered *TriggerEvaluation `json:"last_triggered,omitempty"` } +type TriggerInfo struct { + // The run id of the Run Job task run + RunId int `json:"run_id,omitempty"` +} + type TriggerSettings struct { // File arrival trigger settings. FileArrival *FileArrivalTriggerConfiguration `json:"file_arrival,omitempty"` @@ -2390,6 +2464,9 @@ const TriggerTypePeriodic TriggerType = `PERIODIC` // occurs when you request to re-run the job in case of failures. const TriggerTypeRetry TriggerType = `RETRY` +// Indicates a run that is triggered using a Run Job task. +const TriggerTypeRunJobTask TriggerType = `RUN_JOB_TASK` + // String representation for [fmt.Print] func (f *TriggerType) String() string { return string(*f) @@ -2398,11 +2475,11 @@ func (f *TriggerType) String() string { // Set raw string value and validate it against allowed values func (f *TriggerType) Set(v string) error { switch v { - case `FILE_ARRIVAL`, `ONE_TIME`, `PERIODIC`, `RETRY`: + case `FILE_ARRIVAL`, `ONE_TIME`, `PERIODIC`, `RETRY`, `RUN_JOB_TASK`: *f = TriggerType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "FILE_ARRIVAL", "ONE_TIME", "PERIODIC", "RETRY"`, v) + return fmt.Errorf(`value "%s" is not one of "FILE_ARRIVAL", "ONE_TIME", "PERIODIC", "RETRY", "RUN_JOB_TASK"`, v) } } diff --git a/service/ml/model_registry_usage_test.go b/service/ml/model_registry_usage_test.go index 393ab2811..b5ccf9028 100755 --- a/service/ml/model_registry_usage_test.go +++ b/service/ml/model_registry_usage_test.go @@ -58,20 +58,20 @@ func ExampleModelRegistryAPI_CreateComment_modelVersionComments() { } -func ExampleModelRegistryAPI_CreateModel_modelVersionComments() { +func ExampleModelRegistryAPI_CreateModel_models() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } - model, err := w.ModelRegistry.CreateModel(ctx, ml.CreateModelRequest{ + created, err := w.ModelRegistry.CreateModel(ctx, ml.CreateModelRequest{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }) if err != nil { panic(err) } - logger.Infof(ctx, "found %v", model) + logger.Infof(ctx, "found %v", created) } @@ -92,24 +92,24 @@ func ExampleModelRegistryAPI_CreateModel_modelVersions() { } -func ExampleModelRegistryAPI_CreateModel_models() { +func ExampleModelRegistryAPI_CreateModel_modelVersionComments() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } - created, err := w.ModelRegistry.CreateModel(ctx, ml.CreateModelRequest{ + model, err := w.ModelRegistry.CreateModel(ctx, ml.CreateModelRequest{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }) if err != nil { panic(err) } - logger.Infof(ctx, "found %v", created) + logger.Infof(ctx, "found %v", model) } -func ExampleModelRegistryAPI_CreateModelVersion_modelVersions() { +func ExampleModelRegistryAPI_CreateModelVersion_modelVersionComments() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { @@ -124,18 +124,18 @@ func ExampleModelRegistryAPI_CreateModelVersion_modelVersions() { } logger.Infof(ctx, "found %v", model) - created, err := w.ModelRegistry.CreateModelVersion(ctx, ml.CreateModelVersionRequest{ + mv, err := w.ModelRegistry.CreateModelVersion(ctx, ml.CreateModelVersionRequest{ Name: model.RegisteredModel.Name, Source: "dbfs:/tmp", }) if err != nil { panic(err) } - logger.Infof(ctx, "found %v", created) + logger.Infof(ctx, "found %v", mv) } -func ExampleModelRegistryAPI_CreateModelVersion_modelVersionComments() { +func ExampleModelRegistryAPI_CreateModelVersion_modelVersions() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { @@ -150,14 +150,14 @@ func ExampleModelRegistryAPI_CreateModelVersion_modelVersionComments() { } logger.Infof(ctx, "found %v", model) - mv, err := w.ModelRegistry.CreateModelVersion(ctx, ml.CreateModelVersionRequest{ + created, err := w.ModelRegistry.CreateModelVersion(ctx, ml.CreateModelVersionRequest{ Name: model.RegisteredModel.Name, Source: "dbfs:/tmp", }) if err != nil { panic(err) } - logger.Infof(ctx, "found %v", mv) + logger.Infof(ctx, "found %v", created) } diff --git a/service/pkg.go b/service/pkg.go index cacd2490a..26ab862bd 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -12,6 +12,8 @@ // // - [catalog.CatalogsAPI]: A catalog is the first layer of Unity Catalog’s three-level namespace. // +// - [sharing.CleanRoomsAPI]: A clean room is a secure, privacy-protecting environment where two or more parties can share sensitive enterprise data, including customer data, for measurements, insights, activation and other use cases. +// // - [compute.ClusterPoliciesAPI]: Cluster policy limits the ability to configure clusters based on a set of rules. // // - [compute.ClustersAPI]: The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. @@ -112,7 +114,7 @@ // // - [serving.ServingEndpointsAPI]: The Serving Endpoints API allows you to create, update, and delete model serving endpoints. // -// - [settings.AccountSettingsAPI]: TBD. +// - [settings.AccountSettingsAPI]: The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources. // // - [sharing.SharesAPI]: Databricks Shares REST API. // @@ -184,6 +186,7 @@ var ( _ *billing.BillableUsageAPI = nil _ *billing.BudgetsAPI = nil _ *catalog.CatalogsAPI = nil + _ *sharing.CleanRoomsAPI = nil _ *compute.ClusterPoliciesAPI = nil _ *compute.ClustersAPI = nil _ *compute.CommandExecutionAPI = nil diff --git a/service/settings/api.go b/service/settings/api.go index 049af0fe5..06dea0575 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -222,7 +222,15 @@ func NewAccountSettings(client *client.DatabricksClient) *AccountSettingsAPI { } } -// TBD +// The Personal Compute enablement setting lets you control which users can use +// the Personal Compute default policy to create compute resources. By default +// all users in all workspaces have access (ON), but you can change the setting +// to instead let individual workspaces configure access control (DELEGATE). +// +// There is only one instance of this setting per account. Since this setting +// has a default value, this setting is present on all accounts even though it's +// never set on a given account. Deletion reverts the value of the setting back +// to the default value. type AccountSettingsAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(AccountSettingsService) @@ -243,21 +251,21 @@ func (a *AccountSettingsAPI) Impl() AccountSettingsService { // Delete Personal Compute setting. // -// TBD +// Reverts back the Personal Compute setting value to default (ON) func (a *AccountSettingsAPI) DeletePersonalComputeSetting(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) { return a.impl.DeletePersonalComputeSetting(ctx, request) } // Get Personal Compute setting. // -// TBD +// Gets the value of the Personal Compute setting. func (a *AccountSettingsAPI) ReadPersonalComputeSetting(ctx context.Context, request ReadPersonalComputeSettingRequest) (*PersonalComputeSetting, error) { return a.impl.ReadPersonalComputeSetting(ctx, request) } // Update Personal Compute setting. // -// TBD +// Updates the value of the Personal Compute setting. func (a *AccountSettingsAPI) UpdatePersonalComputeSetting(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) { return a.impl.UpdatePersonalComputeSetting(ctx, request) } diff --git a/service/settings/interface.go b/service/settings/interface.go index 7b0fe9dcd..c3ea2e2fe 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -103,22 +103,30 @@ type AccountIpAccessListsService interface { Update(ctx context.Context, request UpdateIpAccessList) error } -// TBD +// The Personal Compute enablement setting lets you control which users can use +// the Personal Compute default policy to create compute resources. By default +// all users in all workspaces have access (ON), but you can change the setting +// to instead let individual workspaces configure access control (DELEGATE). +// +// There is only one instance of this setting per account. Since this setting +// has a default value, this setting is present on all accounts even though it's +// never set on a given account. Deletion reverts the value of the setting back +// to the default value. type AccountSettingsService interface { // Delete Personal Compute setting. // - // TBD + // Reverts back the Personal Compute setting value to default (ON) DeletePersonalComputeSetting(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) // Get Personal Compute setting. // - // TBD + // Gets the value of the Personal Compute setting. ReadPersonalComputeSetting(ctx context.Context, request ReadPersonalComputeSettingRequest) (*PersonalComputeSetting, error) // Update Personal Compute setting. // - // TBD + // Updates the value of the Personal Compute setting. UpdatePersonalComputeSetting(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) } diff --git a/service/settings/model.go b/service/settings/model.go index 9abc1ddd8..7a1fa0013 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -64,12 +64,24 @@ type DeleteIpAccessListRequest struct { // Delete Personal Compute setting type DeletePersonalComputeSettingRequest struct { - // TBD - Etag string `json:"-" url:"etag,omitempty"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag"` } type DeletePersonalComputeSettingResponse struct { - // TBD + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. Etag string `json:"etag"` } @@ -181,11 +193,22 @@ func (f *ListType) Type() string { } type PersonalComputeMessage struct { - // TBD + // ON: Grants all users in all workspaces access to the Personal Compute + // default policy, allowing all users to create single-machine compute + // resources. DELEGATE: Moves access control for the Personal Compute + // default policy to individual workspaces and requires a workspace’s + // users or groups to be added to the ACLs of that workspace’s Personal + // Compute default policy before they will be able to create compute + // resources through that policy. Value PersonalComputeMessageEnum `json:"value"` } -// TBD +// ON: Grants all users in all workspaces access to the Personal Compute default +// policy, allowing all users to create single-machine compute resources. +// DELEGATE: Moves access control for the Personal Compute default policy to +// individual workspaces and requires a workspace’s users or groups to be +// added to the ACLs of that workspace’s Personal Compute default policy +// before they will be able to create compute resources through that policy. type PersonalComputeMessageEnum string const PersonalComputeMessageEnumDelegate PersonalComputeMessageEnum = `DELEGATE` @@ -214,12 +237,18 @@ func (f *PersonalComputeMessageEnum) Type() string { } type PersonalComputeSetting struct { - // TBD + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. Etag string `json:"etag,omitempty"` PersonalCompute PersonalComputeMessage `json:"personal_compute"` - // Name of the corresponding setting. Needs to be 'default' if the setting - // is a singleton. + // Name of the corresponding setting. Needs to be 'default' if there is only + // one setting instance per account. SettingName string `json:"setting_name,omitempty"` } @@ -237,8 +266,14 @@ type PublicTokenInfo struct { // Get Personal Compute setting type ReadPersonalComputeSettingRequest struct { - // TBD - Etag string `json:"-" url:"etag,omitempty"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag"` } type ReplaceIpAccessList struct { @@ -296,7 +331,8 @@ type UpdateIpAccessList struct { // Update Personal Compute setting type UpdatePersonalComputeSettingRequest struct { - // TBD + // This should always be set to true for Settings RPCs. Added for AIP + // compliance. AllowMissing bool `json:"allow_missing,omitempty"` Setting *PersonalComputeSetting `json:"setting,omitempty"` diff --git a/service/sharing/api.go b/service/sharing/api.go index 25fa40441..23a402cc5 100755 --- a/service/sharing/api.go +++ b/service/sharing/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Providers, Recipient Activation, Recipients, Shares, etc. +// These APIs allow you to manage Clean Rooms, Providers, Recipient Activation, Recipients, Shares, etc. package sharing import ( @@ -12,6 +12,118 @@ import ( "github.com/databricks/databricks-sdk-go/useragent" ) +func NewCleanRooms(client *client.DatabricksClient) *CleanRoomsAPI { + return &CleanRoomsAPI{ + impl: &cleanRoomsImpl{ + client: client, + }, + } +} + +// A clean room is a secure, privacy-protecting environment where two or more +// parties can share sensitive enterprise data, including customer data, for +// measurements, insights, activation and other use cases. +// +// To create clean rooms, you must be a metastore admin or a user with the +// **CREATE_CLEAN_ROOM** privilege. +type CleanRoomsAPI struct { + // impl contains low-level REST API interface, that could be overridden + // through WithImpl(CleanRoomsService) + impl CleanRoomsService +} + +// WithImpl could be used to override low-level API implementations for unit +// testing purposes with [github.com/golang/mock] or other mocking frameworks. +func (a *CleanRoomsAPI) WithImpl(impl CleanRoomsService) *CleanRoomsAPI { + a.impl = impl + return a +} + +// Impl returns low-level CleanRooms API implementation +func (a *CleanRoomsAPI) Impl() CleanRoomsService { + return a.impl +} + +// Create a clean room. +// +// Creates a new clean room with specified colaborators. The caller must be a +// metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore. +func (a *CleanRoomsAPI) Create(ctx context.Context, request CreateCleanRoom) (*CleanRoomInfo, error) { + return a.impl.Create(ctx, request) +} + +// Delete a clean room. +// +// Deletes a data object clean room from the metastore. The caller must be an +// owner of the clean room. +func (a *CleanRoomsAPI) Delete(ctx context.Context, request DeleteCleanRoomRequest) error { + return a.impl.Delete(ctx, request) +} + +// Delete a clean room. +// +// Deletes a data object clean room from the metastore. The caller must be an +// owner of the clean room. +func (a *CleanRoomsAPI) DeleteByNameArg(ctx context.Context, nameArg string) error { + return a.impl.Delete(ctx, DeleteCleanRoomRequest{ + NameArg: nameArg, + }) +} + +// Get a clean room. +// +// Gets a data object clean room from the metastore. The caller must be a +// metastore admin or the owner of the clean room. +func (a *CleanRoomsAPI) Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoomInfo, error) { + return a.impl.Get(ctx, request) +} + +// Get a clean room. +// +// Gets a data object clean room from the metastore. The caller must be a +// metastore admin or the owner of the clean room. +func (a *CleanRoomsAPI) GetByNameArg(ctx context.Context, nameArg string) (*CleanRoomInfo, error) { + return a.impl.Get(ctx, GetCleanRoomRequest{ + NameArg: nameArg, + }) +} + +// List clean rooms. +// +// Gets an array of data object clean rooms from the metastore. The caller must +// be a metastore admin or the owner of the clean room. There is no guarantee of +// a specific ordering of the elements in the array. +// +// This method is generated by Databricks SDK Code Generator. +func (a *CleanRoomsAPI) ListAll(ctx context.Context) ([]CleanRoomInfo, error) { + response, err := a.impl.List(ctx) + if err != nil { + return nil, err + } + return response.CleanRooms, nil +} + +// Update a clean room. +// +// Updates the clean room with the changes and data objects in the request. The +// caller must be the owner of the clean room or a metastore admin. +// +// When the caller is a metastore admin, only the __owner__ field can be +// updated. +// +// In the case that the clean room name is changed **updateCleanRoom** requires +// that the caller is both the clean room owner and a metastore admin. +// +// For each table that is added through this method, the clean room owner must +// also have **SELECT** privilege on the table. The privilege must be maintained +// indefinitely for recipients to be able to access the table. Typically, you +// should use a group as the clean room owner. +// +// Table removals through **update** do not require additional privileges. +func (a *CleanRoomsAPI) Update(ctx context.Context, request UpdateCleanRoom) (*CleanRoomInfo, error) { + return a.impl.Update(ctx, request) +} + func NewProviders(client *client.DatabricksClient) *ProvidersAPI { return &ProvidersAPI{ impl: &providersImpl{ diff --git a/service/sharing/impl.go b/service/sharing/impl.go index 2a0772927..beee859e1 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -12,6 +12,45 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" ) +// unexported type that holds implementations of just CleanRooms API methods +type cleanRoomsImpl struct { + client *client.DatabricksClient +} + +func (a *cleanRoomsImpl) Create(ctx context.Context, request CreateCleanRoom) (*CleanRoomInfo, error) { + var cleanRoomInfo CleanRoomInfo + path := "/api/2.1/unity-catalog/clean-rooms" + err := a.client.Do(ctx, http.MethodPost, path, request, &cleanRoomInfo) + return &cleanRoomInfo, err +} + +func (a *cleanRoomsImpl) Delete(ctx context.Context, request DeleteCleanRoomRequest) error { + path := fmt.Sprintf("/api/2.1/unity-catalog/clean-rooms/%v", request.NameArg) + err := a.client.Do(ctx, http.MethodDelete, path, request, nil) + return err +} + +func (a *cleanRoomsImpl) Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoomInfo, error) { + var cleanRoomInfo CleanRoomInfo + path := fmt.Sprintf("/api/2.1/unity-catalog/clean-rooms/%v", request.NameArg) + err := a.client.Do(ctx, http.MethodGet, path, request, &cleanRoomInfo) + return &cleanRoomInfo, err +} + +func (a *cleanRoomsImpl) List(ctx context.Context) (*ListCleanRoomsResponse, error) { + var listCleanRoomsResponse ListCleanRoomsResponse + path := "/api/2.1/unity-catalog/clean-rooms" + err := a.client.Do(ctx, http.MethodGet, path, nil, &listCleanRoomsResponse) + return &listCleanRoomsResponse, err +} + +func (a *cleanRoomsImpl) Update(ctx context.Context, request UpdateCleanRoom) (*CleanRoomInfo, error) { + var cleanRoomInfo CleanRoomInfo + path := fmt.Sprintf("/api/2.1/unity-catalog/clean-rooms/%v", request.NameArg) + err := a.client.Do(ctx, http.MethodPatch, path, request, &cleanRoomInfo) + return &cleanRoomInfo, err +} + // unexported type that holds implementations of just Providers API methods type providersImpl struct { client *client.DatabricksClient diff --git a/service/sharing/interface.go b/service/sharing/interface.go index 91917a2ae..f7e5e6ab6 100755 --- a/service/sharing/interface.go +++ b/service/sharing/interface.go @@ -8,6 +8,63 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" ) +// A clean room is a secure, privacy-protecting environment where two or more +// parties can share sensitive enterprise data, including customer data, for +// measurements, insights, activation and other use cases. +// +// To create clean rooms, you must be a metastore admin or a user with the +// **CREATE_CLEAN_ROOM** privilege. +type CleanRoomsService interface { + + // Create a clean room. + // + // Creates a new clean room with specified colaborators. The caller must be + // a metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the + // metastore. + Create(ctx context.Context, request CreateCleanRoom) (*CleanRoomInfo, error) + + // Delete a clean room. + // + // Deletes a data object clean room from the metastore. The caller must be + // an owner of the clean room. + Delete(ctx context.Context, request DeleteCleanRoomRequest) error + + // Get a clean room. + // + // Gets a data object clean room from the metastore. The caller must be a + // metastore admin or the owner of the clean room. + Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoomInfo, error) + + // List clean rooms. + // + // Gets an array of data object clean rooms from the metastore. The caller + // must be a metastore admin or the owner of the clean room. There is no + // guarantee of a specific ordering of the elements in the array. + // + // Use ListAll() to get all CleanRoomInfo instances + List(ctx context.Context) (*ListCleanRoomsResponse, error) + + // Update a clean room. + // + // Updates the clean room with the changes and data objects in the request. + // The caller must be the owner of the clean room or a metastore admin. + // + // When the caller is a metastore admin, only the __owner__ field can be + // updated. + // + // In the case that the clean room name is changed **updateCleanRoom** + // requires that the caller is both the clean room owner and a metastore + // admin. + // + // For each table that is added through this method, the clean room owner + // must also have **SELECT** privilege on the table. The privilege must be + // maintained indefinitely for recipients to be able to access the table. + // Typically, you should use a group as the clean room owner. + // + // Table removals through **update** do not require additional privileges. + Update(ctx context.Context, request UpdateCleanRoom) (*CleanRoomInfo, error) +} + // Databricks Providers REST API type ProvidersService interface { diff --git a/service/sharing/model.go b/service/sharing/model.go index 3d9e9ce20..dbadb6a3f 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -38,6 +38,214 @@ func (f *AuthenticationType) Type() string { return "AuthenticationType" } +type CentralCleanRoomInfo struct { + // All assets from all collaborators that are available in the clean room. + // Only one of table_info or notebook_info will be filled in. + CleanRoomAssets []CleanRoomAssetInfo `json:"clean_room_assets,omitempty"` + // All collaborators who are in the clean room. + Collaborators []CleanRoomCollaboratorInfo `json:"collaborators,omitempty"` + // The collaborator who created the clean room. + Creator *CleanRoomCollaboratorInfo `json:"creator,omitempty"` + // The cloud where clean room tasks will be run. + StationCloud string `json:"station_cloud,omitempty"` + // The region where clean room tasks will be run. + StationRegion string `json:"station_region,omitempty"` +} + +type CleanRoomAssetInfo struct { + // Time at which this asset was added, in epoch milliseconds. + AddedAt int64 `json:"added_at,omitempty"` + // Details about the notebook asset. + NotebookInfo *CleanRoomNotebookInfo `json:"notebook_info,omitempty"` + // The collaborator who owns the asset. + Owner *CleanRoomCollaboratorInfo `json:"owner,omitempty"` + // Details about the table asset. + TableInfo *CleanRoomTableInfo `json:"table_info,omitempty"` + // Time at which this asset was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` +} + +type CleanRoomCatalog struct { + // Name of the catalog in the clean room station. Empty for notebooks. + CatalogName string `json:"catalog_name,omitempty"` + // The details of the shared notebook files. + NotebookFiles []SharedDataObject `json:"notebook_files,omitempty"` + // The details of the shared tables. + Tables []SharedDataObject `json:"tables,omitempty"` +} + +type CleanRoomCatalogUpdate struct { + // The name of the catalog to update assets. + CatalogName string `json:"catalog_name,omitempty"` + // The updates to the assets in the catalog. + Updates *SharedDataObjectUpdate `json:"updates,omitempty"` +} + +type CleanRoomCollaboratorInfo struct { + // The global Unity Catalog metastore id of the collaborator. Also known as + // the sharing identifier. The identifier is of format + // __cloud__:__region__:__metastore-uuid__. + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + // The organization name of the collaborator. This is configured in the + // metastore for Delta Sharing and is used to identify the organization to + // other collaborators. + OrganizationName string `json:"organization_name,omitempty"` +} + +type CleanRoomInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Time at which this clean room was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of clean room creator. + CreatedBy string `json:"created_by,omitempty"` + // Catalog aliases shared by the current collaborator with asset details. + LocalCatalogs []CleanRoomCatalog `json:"local_catalogs,omitempty"` + // Name of the clean room. + Name string `json:"name,omitempty"` + // Username of current owner of clean room. + Owner string `json:"owner,omitempty"` + // Central clean room details. + RemoteDetailedInfo *CentralCleanRoomInfo `json:"remote_detailed_info,omitempty"` + // Time at which this clean room was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of clean room updater. + UpdatedBy string `json:"updated_by,omitempty"` +} + +type CleanRoomNotebookInfo struct { + // The base64 representation of the notebook content in HTML. + NotebookContent string `json:"notebook_content,omitempty"` + // The name of the notebook. + NotebookName string `json:"notebook_name,omitempty"` +} + +type CleanRoomTableInfo struct { + // Name of parent catalog. + CatalogName string `json:"catalog_name,omitempty"` + // The array of __ColumnInfo__ definitions of the table's columns. + Columns []ColumnInfo `json:"columns,omitempty"` + // Full name of table, in form of + // __catalog_name__.__schema_name__.__table_name__ + FullName string `json:"full_name,omitempty"` + // Name of table, relative to parent schema. + Name string `json:"name,omitempty"` + // Name of parent schema relative to its parent catalog. + SchemaName string `json:"schema_name,omitempty"` +} + +type ColumnInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + + Mask *ColumnMask `json:"mask,omitempty"` + // Name of Column. + Name string `json:"name,omitempty"` + // Whether field may be Null (default: true). + Nullable bool `json:"nullable,omitempty"` + // Partition index for column. + PartitionIndex int `json:"partition_index,omitempty"` + // Ordinal position of column (starting at position 0). + Position int `json:"position,omitempty"` + // Format of IntervalType. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // Full data type specification, JSON-serialized. + TypeJson string `json:"type_json,omitempty"` + // Name of type (INT, STRUCT, MAP, etc.). + TypeName ColumnTypeName `json:"type_name,omitempty"` + // Digits of precision; required for DecimalTypes. + TypePrecision int `json:"type_precision,omitempty"` + // Digits to right of decimal; Required for DecimalTypes. + TypeScale int `json:"type_scale,omitempty"` + // Full data type specification as SQL/catalogString text. + TypeText string `json:"type_text,omitempty"` +} + +type ColumnMask struct { + // The full name of the column mask SQL UDF. + FunctionName string `json:"function_name,omitempty"` + // The list of additional table columns to be passed as input to the column + // mask function. The first arg of the mask function should be of the type + // of the column being masked and the types of the rest of the args should + // match the types of columns in 'using_column_names'. + UsingColumnNames []string `json:"using_column_names,omitempty"` +} + +// Name of type (INT, STRUCT, MAP, etc.). +type ColumnTypeName string + +const ColumnTypeNameArray ColumnTypeName = `ARRAY` + +const ColumnTypeNameBinary ColumnTypeName = `BINARY` + +const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN` + +const ColumnTypeNameByte ColumnTypeName = `BYTE` + +const ColumnTypeNameChar ColumnTypeName = `CHAR` + +const ColumnTypeNameDate ColumnTypeName = `DATE` + +const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL` + +const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` + +const ColumnTypeNameFloat ColumnTypeName = `FLOAT` + +const ColumnTypeNameInt ColumnTypeName = `INT` + +const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` + +const ColumnTypeNameLong ColumnTypeName = `LONG` + +const ColumnTypeNameMap ColumnTypeName = `MAP` + +const ColumnTypeNameNull ColumnTypeName = `NULL` + +const ColumnTypeNameShort ColumnTypeName = `SHORT` + +const ColumnTypeNameString ColumnTypeName = `STRING` + +const ColumnTypeNameStruct ColumnTypeName = `STRUCT` + +const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE` + +const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP` + +const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ` + +const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE` + +// String representation for [fmt.Print] +func (f *ColumnTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`: + *f = ColumnTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE"`, v) + } +} + +// Type always returns ColumnTypeName to satisfy [pflag.Value] interface +func (f *ColumnTypeName) Type() string { + return "ColumnTypeName" +} + +type CreateCleanRoom struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of the clean room. + Name string `json:"name"` + // Central clean room details. + RemoteDetailedInfo CentralCleanRoomInfo `json:"remote_detailed_info"` +} + type CreateProvider struct { // The delta sharing authentication type. AuthenticationType AuthenticationType `json:"authentication_type"` @@ -80,6 +288,12 @@ type CreateShare struct { Name string `json:"name"` } +// Delete a clean room +type DeleteCleanRoomRequest struct { + // The name of the clean room. + NameArg string `json:"-" url:"-"` +} + // Delete a provider type DeleteProviderRequest struct { // Name of the provider. @@ -104,6 +318,14 @@ type GetActivationUrlInfoRequest struct { ActivationUrl string `json:"-" url:"-"` } +// Get a clean room +type GetCleanRoomRequest struct { + // Whether to include remote details (central) on the clean room. + IncludeRemoteDetails bool `json:"-" url:"include_remote_details,omitempty"` + // The name of the clean room. + NameArg string `json:"-" url:"-"` +} + // Get a provider type GetProviderRequest struct { // Name of the provider. @@ -134,6 +356,11 @@ type IpAccessList struct { AllowedIpAddresses []string `json:"allowed_ip_addresses,omitempty"` } +type ListCleanRoomsResponse struct { + // An array of clean rooms. Remote details (central) are not included. + CleanRooms []CleanRoomInfo `json:"clean_rooms,omitempty"` +} + type ListProviderSharesResponse struct { // An array of provider shares. Shares []ProviderShare `json:"shares,omitempty"` @@ -635,6 +862,19 @@ func (f *SharedDataObjectUpdateAction) Type() string { return "SharedDataObjectUpdateAction" } +type UpdateCleanRoom struct { + // Array of shared data object updates. + CatalogUpdates []CleanRoomCatalogUpdate `json:"catalog_updates,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of the clean room. + Name string `json:"name,omitempty"` + // The name of the clean room. + NameArg string `json:"-" url:"-"` + // Username of current owner of clean room. + Owner string `json:"owner,omitempty"` +} + type UpdateProvider struct { // Description about the provider. Comment string `json:"comment,omitempty"` diff --git a/service/sql/model.go b/service/sql/model.go index 43e31161c..dd317a1e2 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -17,7 +17,7 @@ type AccessControl struct { type Alert struct { // Timestamp when the alert was created. CreatedAt string `json:"created_at,omitempty"` - // ID of the alert. + // Alert ID. Id string `json:"id,omitempty"` // Timestamp when the alert was last triggered. LastTriggeredAt string `json:"last_triggered_at,omitempty"` @@ -25,11 +25,10 @@ type Alert struct { Name string `json:"name,omitempty"` // Alert configuration options. Options *AlertOptions `json:"options,omitempty"` - // The identifier of the parent folder containing the alert. Available for - // alerts in workspace. + // The identifier of the workspace folder containing the object. Parent string `json:"parent,omitempty"` - Query *Query `json:"query,omitempty"` + Query *AlertQuery `json:"query,omitempty"` // Number of seconds after being triggered before the alert rearms itself // and can be triggered again. If `null`, alert will never be triggered // again. @@ -66,7 +65,46 @@ type AlertOptions struct { // `!=` Op string `json:"op"` // Value used to compare in alert evaluation. - Value string `json:"value"` + Value any `json:"value"` +} + +type AlertQuery struct { + // The timestamp when this query was created. + CreatedAt string `json:"created_at,omitempty"` + // Data source ID. + DataSourceId string `json:"data_source_id,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Query ID. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. Trashed queries can't be used in + // dashboards, or appear in search results. If this boolean is `true`, the + // `options` property for this query includes a `moved_to_trash_at` + // timestamp. Trashed queries are permanently deleted after 30 days. + IsArchived bool `json:"is_archived,omitempty"` + // Whether the query is a draft. Draft queries only appear in list views for + // their owners. Visualizations from draft queries cannot appear on + // dashboards. + IsDraft bool `json:"is_draft,omitempty"` + // Text parameter types are not safe from SQL injection for all types of + // data source. Set this Boolean parameter to `true` if a query either does + // not use any text type parameters or uses a data source type where text + // type parameters are handled safely. + IsSafe bool `json:"is_safe,omitempty"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name string `json:"name,omitempty"` + + Options *QueryOptions `json:"options,omitempty"` + // The text of the query to be run. + Query string `json:"query,omitempty"` + + Tags []string `json:"tags,omitempty"` + // The timestamp at which this query was last updated. + UpdatedAt string `json:"updated_at,omitempty"` + // The ID of the user who created this query. + UserId int `json:"user_id,omitempty"` } // State of the alert. Possible values are: `unknown` (yet to be evaluated), @@ -257,10 +295,9 @@ type CreateAlert struct { Name string `json:"name"` // Alert configuration options. Options AlertOptions `json:"options"` - // The identifier of the workspace folder containing the alert. The default - // is ther user's home folder. + // The identifier of the workspace folder containing the object. Parent string `json:"parent,omitempty"` - // ID of the query evaluated by the alert. + // Query ID. QueryId string `json:"query_id"` // Number of seconds after being triggered before the alert rearms itself // and can be triggered again. If `null`, alert will never be triggered @@ -277,8 +314,7 @@ type CreateDashboardRequest struct { // The title of this dashboard that appears in list views and at the top of // the dashboard page. Name string `json:"name,omitempty"` - // The identifier of the workspace folder containing the dashboard. The - // default is the user's home folder. + // The identifier of the workspace folder containing the object. Parent string `json:"parent,omitempty"` Tags []string `json:"tags,omitempty"` @@ -412,8 +448,7 @@ type Dashboard struct { Name string `json:"name,omitempty"` Options *DashboardOptions `json:"options,omitempty"` - // The identifier of the parent folder containing the dashboard. Available - // for dashboards in workspace. + // The identifier of the workspace folder containing the object. Parent string `json:"parent,omitempty"` // This describes an enum PermissionTier PermissionLevel `json:"permission_tier,omitempty"` @@ -441,8 +476,7 @@ type DashboardOptions struct { // A JSON object representing a DBSQL data source / SQL warehouse. type DataSource struct { - // The unique identifier for this data source / SQL warehouse. Can be used - // when creating / modifying queries and dashboards. + // Data source ID. Id string `json:"id,omitempty"` // The string name of this data source / SQL warehouse as it appears in the // Databricks SQL web application. @@ -543,7 +577,7 @@ type EditAlert struct { Name string `json:"name"` // Alert configuration options. Options AlertOptions `json:"options"` - // ID of the query evaluated by the alert. + // Query ID. QueryId string `json:"query_id"` // Number of seconds after being triggered before the alert rearms itself // and can be triggered again. If `null`, alert will never be triggered @@ -1562,13 +1596,12 @@ type Query struct { CanEdit bool `json:"can_edit,omitempty"` // The timestamp when this query was created. CreatedAt string `json:"created_at,omitempty"` - // Data Source ID. The UUID that uniquely identifies this data source / SQL - // warehouse across the API. + // Data source ID. DataSourceId string `json:"data_source_id,omitempty"` // General description that conveys additional information about this query // such as usage notes. Description string `json:"description,omitempty"` - + // Query ID. Id string `json:"id,omitempty"` // Indicates whether the query is trashed. Trashed queries can't be used in // dashboards, or appear in search results. If this boolean is `true`, the @@ -1600,8 +1633,7 @@ type Query struct { Name string `json:"name,omitempty"` Options *QueryOptions `json:"options,omitempty"` - // The identifier of the parent folder containing the query. Available for - // queries in workspace. + // The identifier of the workspace folder containing the object. Parent string `json:"parent,omitempty"` // This describes an enum PermissionTier PermissionLevel `json:"permission_tier,omitempty"` @@ -1622,18 +1654,19 @@ type Query struct { } type QueryEditContent struct { - // The ID of the data source / SQL warehouse where this query will run. + // Data source ID. DataSourceId string `json:"data_source_id,omitempty"` - // General description that can convey additional information about this - // query such as usage notes. + // General description that conveys additional information about this query + // such as usage notes. Description string `json:"description,omitempty"` - // The name or title of this query to display in list views. + // The title of this query that appears in list views, widget headings, and + // on the query page. Name string `json:"name,omitempty"` // Exclusively used for storing a list parameter definitions. A parameter is // an object with `title`, `name`, `type`, and `value` properties. The // `value` field here is the default value. It can be overridden at runtime. Options any `json:"options,omitempty"` - // The text of the query. + // The text of the query to be run. Query string `json:"query,omitempty"` QueryId string `json:"-" url:"-"` @@ -1776,21 +1809,21 @@ type QueryOptions struct { } type QueryPostContent struct { - // The ID of the data source / SQL warehouse where this query will run. + // Data source ID. DataSourceId string `json:"data_source_id,omitempty"` - // General description that can convey additional information about this - // query such as usage notes. + // General description that conveys additional information about this query + // such as usage notes. Description string `json:"description,omitempty"` - // The name or title of this query to display in list views. + // The title of this query that appears in list views, widget headings, and + // on the query page. Name string `json:"name,omitempty"` // Exclusively used for storing a list parameter definitions. A parameter is // an object with `title`, `name`, `type`, and `value` properties. The // `value` field here is the default value. It can be overridden at runtime. Options any `json:"options,omitempty"` - // The identifier of the workspace folder containing the query. The default - // is the user's home folder. + // The identifier of the workspace folder containing the object. Parent string `json:"parent,omitempty"` - // The text of the query. + // The text of the query to be run. Query string `json:"query,omitempty"` } @@ -2620,13 +2653,8 @@ type User struct { Email string `json:"email,omitempty"` Id int `json:"id,omitempty"` - // Whether this user is an admin in the Databricks workspace. - IsDbAdmin bool `json:"is_db_admin,omitempty"` Name string `json:"name,omitempty"` - // The URL for the gravatar profile picture tied to this user's email - // address. - ProfileImageUrl string `json:"profile_image_url,omitempty"` } // The visualization description API changes frequently and is unsupported. You diff --git a/workspace_client.go b/workspace_client.go index e144e5f6f..7965688d0 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -49,6 +49,14 @@ type WorkspaceClient struct { // depending on privileges granted centrally in Unity Catalog. Catalogs *catalog.CatalogsAPI + // A clean room is a secure, privacy-protecting environment where two or + // more parties can share sensitive enterprise data, including customer + // data, for measurements, insights, activation and other use cases. + // + // To create clean rooms, you must be a metastore admin or a user with the + // **CREATE_CLEAN_ROOM** privilege. + CleanRooms *sharing.CleanRoomsAPI + // Cluster policy limits the ability to configure clusters based on a set of // rules. The policy rules limit the attributes or attribute values // available for cluster creation. Cluster policies have ACLs that limit @@ -790,6 +798,7 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { AccountAccessControlProxy: iam.NewAccountAccessControlProxy(apiClient), Alerts: sql.NewAlerts(apiClient), Catalogs: catalog.NewCatalogs(apiClient), + CleanRooms: sharing.NewCleanRooms(apiClient), ClusterPolicies: compute.NewClusterPolicies(apiClient), Clusters: compute.NewClusters(apiClient), CommandExecution: compute.NewCommandExecution(apiClient),