Skip to content

Commit

Permalink
# Add: biglake_configuration to bigquery_table (GoogleCloudPlatform#1…
Browse files Browse the repository at this point in the history
  • Loading branch information
nevzheng authored and Philip Jonany committed Nov 4, 2024
1 parent cad0c25 commit 066860a
Show file tree
Hide file tree
Showing 3 changed files with 125 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -890,6 +890,53 @@ func ResourceBigQueryTable() *schema.Resource {
},
},

// BiglakeConfiguration [Optional] Specifies the configuration of a BigLake managed table.
"biglake_configuration": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ForceNew: true,
Description: "Specifies the configuration of a BigLake managed table.",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
// ConnectionId: [Required] The connection specifying the credentials to be used to read
// and write to external storage, such as Cloud Storage. The connection_id can have the
// form "<project\_id>.<location\_id>.<connection\_id>" or
// "projects/<project\_id>/locations/<location\_id>/connections/<connection\_id>".
"connection_id": {
Type: schema.TypeString,
Required: true,
DiffSuppressFunc: bigQueryTableConnectionIdSuppress,
ForceNew: true,
Description: `The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project\_id>.<location\_id>.<connection\_id>" or "projects/<project\_id>/locations/<location\_id>/connections/<connection\_id>".`,
},
// StorageUri: [Required] The fully qualified location prefix of the external folder where
// table data is stored. The '*' wildcard character is not allowed.
// The URI should be in the format "gs://bucket/path_to_table/"
"storage_uri": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"`,
},
// FileFormat: [Required] The file format the data is stored in.
"file_format": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "The file format the data is stored in.",
},
// TableFormat: [Required]
"table_format": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "The table format the metadata only snapshots are stored in.",
},
},
},
},

// FriendlyName: [Optional] A descriptive name for this table.
"friendly_name": {
Type: schema.TypeString,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,68 @@ func TestAccBigQueryTable_AvroPartitioning(t *testing.T) {
})
}

func TestAccBigQueryBigLakeManagedTable(t *testing.T) {
t.Parallel()
bucketName := acctest.TestBucketName(t)
connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10))

datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10))
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10))

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigLakeManagedTable(bucketName, connectionID, datasetID, tableID, TEST_SIMPLE_CSV_SCHEMA),
},
},
})
}

func testAccBigLakeManagedTable(bucketName, connectionID, datasetID, tableID, schema string) string {
return fmt.Sprintf(`
data "google_project" "project" {}
resource "google_storage_bucket" "test" {
name = "%s"
location = "US"
force_destroy = true
uniform_bucket_level_access = true
}
resource "google_bigquery_connection" "test" {
connection_id = "%s"
location = "US"
cloud_resource {}
}
resource "google_project_iam_member" "test" {
role = "roles/storage.objectViewer"
project = data.google_project.project.id
member = "serviceAccount:${google_bigquery_connection.test.cloud_resource[0].service_account_id}"
}
resource "google_bigquery_dataset" "test" {
dataset_id = "%s"
}
resource "google_bigquery_table" "test" {
deletion_protection = false
table_id = "%s"
dataset_id = google_bigquery_dataset.test.dataset_id
biglake_configuration {
connection_id = google_bigquery_connection.test.name
storage_uri = "gs://${google_storage_bucket.test.name}/data/"
file_format = "PARQUET"
table_format = "ICEBERG"
}
schema = jsonencode(%s)
depends_on = [
google_project_iam_member.test
]
}
`, bucketName, connectionID, datasetID, tableID, schema)
}

func TestAccBigQueryExternalDataTable_json(t *testing.T) {
t.Parallel()
bucketName := acctest.TestBucketName(t)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ The following arguments are supported:
By defining these properties, the data source can then be queried as
if it were a standard BigQuery table. Structure is [documented below](#nested_external_data_configuration).

* `biglake_configuration` - (Optional) Specifies the configuration of a BigLake managed table. Structure is [documented below](#nested_biglake_configuration)

* `friendly_name` - (Optional) A descriptive name for the table.

* `max_staleness`: (Optional) The maximum staleness of data that could be
Expand Down Expand Up @@ -492,6 +494,20 @@ The following arguments are supported:
* `replication_interval_ms` (Optional) - The interval at which the source
materialized view is polled for updates. The default is 300000.

<a name="nested_biglake_configuration"></a>The `biglake_configuration` block supports:

* `connection_id` - (Required) The connection specifying the credentials to be used to
read and write to external storage, such as Cloud Storage. The connection_id can
have the form "&lt;project\_id&gt;.&lt;location\_id&gt;.&lt;connection\_id&gt;" or
projects/&lt;project\_id&gt;/locations/&lt;location\_id&gt;/connections/&lt;connection\_id&gt;".

* `storage_uri` - (Required) The fully qualified location prefix of the external folder where table data
is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"

* `file_format` - (Required) The file format the table data is stored in.

* `table_format` - (Required) The table format the metadata only snapshots are stored in.

## Attributes Reference

In addition to the arguments listed above, the following computed attributes are
Expand Down

0 comments on commit 066860a

Please sign in to comment.