diff --git a/docs/resources/table_constraint.md b/docs/resources/table_constraint.md index 879c1eb1ce..a7238ffd4b 100644 --- a/docs/resources/table_constraint.md +++ b/docs/resources/table_constraint.md @@ -148,6 +148,5 @@ Required: Import is supported using the following syntax: ```shell -# format is constraint name ❄ constraint type ❄ database name | schema name | table name terraform import snowflake_table_constraint.example 'myconstraintfk❄️FOREIGN KEY❄️databaseName|schemaName|tableName' ``` diff --git a/pkg/acceptance/bettertestspoc/README.md b/pkg/acceptance/bettertestspoc/README.md new file mode 100644 index 0000000000..68b698ea0c --- /dev/null +++ b/pkg/acceptance/bettertestspoc/README.md @@ -0,0 +1,470 @@ +# Better tests poc +This package contains a quick implementation of helpers that should allow us a quicker, more pleasant, and more readable implementation of tests, mainly the acceptance ones. +It contains the following packages: +- `assert` - all the assertions reside here. The currently supported assertions are: resource assertions, show output assertions, parameters assertions, and Snowflake object assertions. +The package contains utilities to build assertions for new objects. +All the assertions will be ultimately generated; the ones presented for warehouse were manually created. +- `config` - the new ResourceModel abstraction resides here. It provides models for objects and the builder methods allowing better config preparation in the acceptance tests. +It aims to be more readable than using `Config:` with hardcoded string or `ConfigFile:` for file that is not directly reachable from the test body. Also, it should be easier to reuse the models and prepare convenience extension methods. +All the models will be ultimately generated; the ones presented for warehouse were manually created. + +## Usage +You can check the current example usage in `TestAcc_Warehouse_BasicFlows` and the `create: complete` inside `TestInt_Warehouses`. To see the output after invalid assertions: +- add the following to the first step of `TestAcc_Warehouse_BasicFlows` +```go + // bad checks below + assert.WarehouseResource(t, "snowflake_warehouse.w"). + HasType(string(sdk.WarehouseTypeSnowparkOptimized)). + HasSize(string(sdk.WarehouseSizeMedium)), + assert.WarehouseShowOutput(t, "snowflake_warehouse.w"). + HasType(sdk.WarehouseTypeSnowparkOptimized), + assert.WarehouseParameters(t, "snowflake_warehouse.w"). + HasMaxConcurrencyLevel(16). + HasMaxConcurrencyLevelLevel(sdk.ParameterTypeWarehouse), + assert.Warehouse(t, warehouseId). + HasName("bad name"). + HasState(sdk.WarehouseStateSuspended). + HasType(sdk.WarehouseTypeSnowparkOptimized). + HasSize(sdk.WarehouseSizeMedium). + HasMaxClusterCount(12). + HasMinClusterCount(13). + HasScalingPolicy(sdk.ScalingPolicyEconomy). + HasAutoSuspend(123). + HasAutoResume(false). + HasResourceMonitor(sdk.NewAccountObjectIdentifier("some-id")). + HasComment("bad comment"). + HasEnableQueryAcceleration(true). + HasQueryAccelerationMaxScaleFactor(12), + assert.Check(resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_type", string(sdk.WarehouseTypeSnowparkOptimized))), +``` +it will result in: +``` + warehouse_acceptance_test.go:46: Step 1/8 error: Check failed: check 6/10 error: + snowflake_warehouse.w resource assertion [1/2]: failed with error: Attribute 'warehouse_type' not found + snowflake_warehouse.w resource assertion [2/2]: failed with error: Attribute 'warehouse_size' not found + check 7/10 error: + snowflake_warehouse.w show_output assertion [2/2]: failed with error: Attribute 'show_output.0.type' expected "SNOWPARK-OPTIMIZED", got "STANDARD" + check 8/10 error: + snowflake_warehouse.w parameters assertion [2/3]: failed with error: Attribute 'parameters.0.max_concurrency_level.0.value' expected "16", got "8" + snowflake_warehouse.w parameters assertion [3/3]: failed with error: Attribute 'parameters.0.max_concurrency_level.0.level' expected "WAREHOUSE", got "" + check 9/10 error: + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [1/13]: failed with error: expected name: bad name; got: URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7 + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [2/13]: failed with error: expected state: SUSPENDED; got: STARTED + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [3/13]: failed with error: expected type: SNOWPARK-OPTIMIZED; got: STANDARD + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [4/13]: failed with error: expected size: MEDIUM; got: XSMALL + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [5/13]: failed with error: expected max cluster count: 12; got: 1 + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [6/13]: failed with error: expected min cluster count: 13; got: 1 + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [7/13]: failed with error: expected type: ECONOMY; got: STANDARD + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [8/13]: failed with error: expected auto suspend: 123; got: 600 + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [9/13]: failed with error: expected auto resume: false; got: true + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [10/13]: failed with error: expected resource monitor: some-id; got: + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [11/13]: failed with error: expected comment: bad comment; got: From furthermore rarely cast anything those you could also whoever. + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [12/13]: failed with error: expected enable query acceleration: true; got: false + object WAREHOUSE["URVBDDAT_E7589B32_6534_1F93_DC1B_9E94FB8D27D7"] assertion [13/13]: failed with error: expected query acceleration max scale factor: 12; got: 8 + check 10/10 error: + snowflake_warehouse.w: Attribute 'warehouse_type' not found +``` + +- add the following to the second step of `TestAcc_Warehouse_BasicFlows` +```go + // bad checks below + assert.CheckImport(importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "bad name", name)), + assert.ImportedWarehouseResource(t, warehouseId.Name()). + HasName("bad name"). + HasType(string(sdk.WarehouseTypeSnowparkOptimized)). + HasSize(string(sdk.WarehouseSizeMedium)). + HasMaxClusterCount("2"). + HasMinClusterCount("3"). + HasScalingPolicy(string(sdk.ScalingPolicyEconomy)). + HasAutoSuspend("123"). + HasAutoResume("false"). + HasResourceMonitor("abc"). + HasComment("bad comment"). + HasEnableQueryAcceleration("true"). + HasQueryAccelerationMaxScaleFactor("16"), + assert.ImportedWarehouseParameters(t, warehouseId.Name()). + HasMaxConcurrencyLevel(1). + HasMaxConcurrencyLevelLevel(sdk.ParameterTypeWarehouse). + HasStatementQueuedTimeoutInSeconds(23). + HasStatementQueuedTimeoutInSecondsLevel(sdk.ParameterTypeWarehouse). + HasStatementTimeoutInSeconds(1232). + HasStatementTimeoutInSecondsLevel(sdk.ParameterTypeWarehouse), + assert.Warehouse(t, warehouseId). + HasName("bad name"). + HasState(sdk.WarehouseStateSuspended). + HasType(sdk.WarehouseTypeSnowparkOptimized). + HasSize(sdk.WarehouseSizeMedium). + HasMaxClusterCount(12). + HasMinClusterCount(13). + HasScalingPolicy(sdk.ScalingPolicyEconomy). + HasAutoSuspend(123). + HasAutoResume(false). + HasResourceMonitor(sdk.NewAccountObjectIdentifier("some-id")). + HasComment("bad comment"). + HasEnableQueryAcceleration(true). + HasQueryAccelerationMaxScaleFactor(12), +``` +it will result in: +``` + warehouse_acceptance_test.go:46: check 6/9 error: + attribute bad name not found in instance state + check 7/9 error: + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [1/12]: failed with error: expected: bad name, got: RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [2/12]: failed with error: expected: SNOWPARK-OPTIMIZED, got: STANDARD + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [3/12]: failed with error: expected: MEDIUM, got: XSMALL + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [4/12]: failed with error: expected: 2, got: 1 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [5/12]: failed with error: expected: 3, got: 1 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [6/12]: failed with error: expected: ECONOMY, got: STANDARD + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [7/12]: failed with error: expected: 123, got: 600 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [8/12]: failed with error: expected: false, got: true + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [9/12]: failed with error: expected: abc, got: + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [10/12]: failed with error: expected: bad comment, got: School huh one here entirely mustering where crew though wealth. + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [11/12]: failed with error: expected: true, got: false + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported resource assertion [12/12]: failed with error: expected: 16, got: 8 + check 8/9 error: + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported parameters assertion [2/7]: failed with error: expected: 1, got: 8 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported parameters assertion [3/7]: failed with error: expected: WAREHOUSE, got: + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported parameters assertion [4/7]: failed with error: expected: 23, got: 0 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported parameters assertion [5/7]: failed with error: expected: WAREHOUSE, got: + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported parameters assertion [6/7]: failed with error: expected: 1232, got: 172800 + RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 imported parameters assertion [7/7]: failed with error: expected: WAREHOUSE, got: + check 9/9 error: + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [1/13]: failed with error: expected name: bad name; got: RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844 + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [2/13]: failed with error: expected state: SUSPENDED; got: STARTED + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [3/13]: failed with error: expected type: SNOWPARK-OPTIMIZED; got: STANDARD + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [4/13]: failed with error: expected size: MEDIUM; got: XSMALL + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [5/13]: failed with error: expected max cluster count: 12; got: 1 + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [6/13]: failed with error: expected min cluster count: 13; got: 1 + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [7/13]: failed with error: expected type: ECONOMY; got: STANDARD + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [8/13]: failed with error: expected auto suspend: 123; got: 600 + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [9/13]: failed with error: expected auto resume: false; got: true + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [10/13]: failed with error: expected resource monitor: some-id; got: + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [11/13]: failed with error: expected comment: bad comment; got: School huh one here entirely mustering where crew though wealth. + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [12/13]: failed with error: expected enable query acceleration: true; got: false + object WAREHOUSE["RQYLJJAT_04646516_1F33_50E9_CC19_D6B14E374844"] assertion [13/13]: failed with error: expected query acceleration max scale factor: 12; got: 8 +``` + +- add the following to the `create: complete` in `TestInt_Warehouses`: +```go + // to show errors + warehouseAssertionsBad := objectAssert.Warehouse(t, id). + HasName("bad name"). + HasState(sdk.WarehouseStateSuspended). + HasType(sdk.WarehouseTypeSnowparkOptimized). + HasSize(sdk.WarehouseSizeMedium). + HasMaxClusterCount(12). + HasMinClusterCount(13). + HasScalingPolicy(sdk.ScalingPolicyStandard). + HasAutoSuspend(123). + HasAutoResume(false). + HasResourceMonitor(sdk.NewAccountObjectIdentifier("some-id")). + HasComment("bad comment"). + HasEnableQueryAcceleration(false). + HasQueryAccelerationMaxScaleFactor(12) + objectAssert.AssertThatObject(t, warehouseAssertionsBad) +``` +it will result in: +``` + commons.go:101: + Error Trace: /Users/asawicki/Projects/terraform-provider-snowflake/pkg/sdk/testint/warehouses_integration_test.go:149 + Error: Received unexpected error: + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [1/13]: failed with error: expected name: bad name; got: VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [2/13]: failed with error: expected state: SUSPENDED; got: STARTED + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [3/13]: failed with error: expected type: SNOWPARK-OPTIMIZED; got: STANDARD + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [4/13]: failed with error: expected size: MEDIUM; got: SMALL + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [5/13]: failed with error: expected max cluster count: 12; got: 8 + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [6/13]: failed with error: expected min cluster count: 13; got: 2 + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [7/13]: failed with error: expected type: STANDARD; got: ECONOMY + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [8/13]: failed with error: expected auto suspend: 123; got: 1000 + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [9/13]: failed with error: expected auto resume: false; got: true + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [10/13]: failed with error: expected resource monitor: some-id; got: OOUJMDIT_535F314F_6549_348F_370E_AB430EE4BC7B + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [11/13]: failed with error: expected comment: bad comment; got: comment + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [12/13]: failed with error: expected enable query acceleration: false; got: true + object WAREHOUSE["VKSENEIT_535F314F_6549_348F_370E_AB430EE4BC7B"] assertion [13/13]: failed with error: expected query acceleration max scale factor: 12; got: 90 + Test: TestInt_Warehouses/create:_complete +``` + +## Adding new resource assertions +For object `abc` create the following files with the described content in the `assert` package: +- `abc_resource.go` +```go +type AbcResourceAssert struct { + *ResourceAssert +} + +func AbcResource(t *testing.T, name string) *AbcResourceAssert { + t.Helper() + + return &AbcResourceAssert{ + ResourceAssert: NewResourceAssert(name, "resource"), + } +} + +func ImportedAbcResource(t *testing.T, id string) *AbcResourceAssert { + t.Helper() + + return &AbcResourceAssert{ + ResourceAssert: NewImportedResourceAssert(id, "imported resource"), + } +} +``` +Two methods for each parameter (let's say parameter name is xyz): +```go +func (w *AbcResourceAssert) HasXyz(expected string) *AbcResourceAssert { + w.assertions = append(w.assertions, valueSet("xyz", expected)) + return w +} + +func (w *AbcResourceAssert) HasNoXyz() *AbcResourceAssert { + w.assertions = append(w.assertions, valueNotSet("xyz")) + return w +} +``` + +- `abc_show_output.go` +```go +type AbcShowOutputAssert struct { + *ResourceAssert +} + +func AbcShowOutput(t *testing.T, name string) *AbcShowOutputAssert { + t.Helper() + w := AbcShowOutputAssert{ + NewResourceAssert(name, "show_output"), + } + w.assertions = append(w.assertions, valueSet("show_output.#", "1")) + return &w +} + +func ImportedAbcShowOutput(t *testing.T, id string) *AbcShowOutputAssert { + t.Helper() + w := AbcShowOutputAssert{ + NewImportedResourceAssert(id, "show_output"), + } + w.assertions = append(w.assertions, valueSet("show_output.#", "1")) + return &w +} +``` + +A method for each parameter (let's say parameter name is xyz): +```go +func (w *AbcShowOutputAssert) HasXyz(expected string) *AbcShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("xyz", string(expected))) + return w +} +``` + +- `abc_parameters.go` +```go +type AbcParametersAssert struct { + *ResourceAssert +} + +func AbcParameters(t *testing.T, name string) *AbcParametersAssert { + t.Helper() + w := AbcParametersAssert{ + NewResourceAssert(name, "parameters"), + } + w.assertions = append(w.assertions, valueSet("parameters.#", "1")) + return &w +} + +func ImportedAbcParameters(t *testing.T, id string) *AbcParametersAssert { + t.Helper() + w := AbcParametersAssert{ + NewImportedResourceAssert(id, "imported parameters"), + } + w.assertions = append(w.assertions, valueSet("parameters.#", "1")) + return &w +} +``` +Two methods for each parameter (let's say parameter name is xyz): +```go +func (w *AbcParametersAssert) HasXyz(expected int) *AbcParametersAssert { + w.assertions = append(w.assertions, parameterValueSet("xyz", strconv.Itoa(expected))) + return w +} + +func (w *AbcParametersAssert) HasXyzLevel(expected sdk.ParameterType) *AbcParametersAssert { + w.assertions = append(w.assertions, parameterLevelSet("xyz", string(expected))) + return w +} +``` +- extensions should be put in `abc_resource_ext.go`, `abc_show_output_ext.go`, or `abc_parameters_ext.go`. We can put here the named aggregations of other assertions. It allows us extendability. Later, we may choose to generate some of these methods too. Currently, the split will help when we start the generation of aforementioned methods. Current examples for extension could be: +```go +func (w *WarehouseResourceAssert) HasDefaultMaxConcurrencyLevel() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("max_concurrency_level", "8")) + return w +} + +func (w *WarehouseResourceAssert) HasDefaultStatementQueuedTimeoutInSeconds() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("statement_queued_timeout_in_seconds", "0")) + return w +} + +func (w *WarehouseResourceAssert) HasDefaultStatementTimeoutInSeconds() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("statement_timeout_in_seconds", "172800")) + return w +} + +func (w *WarehouseResourceAssert) HasAllDefault() *WarehouseResourceAssert { + return w.HasDefaultMaxConcurrencyLevel(). + HasNoType(). + HasNoSize(). + HasNoMaxClusterCount(). + HasNoMinClusterCount(). + HasNoScalingPolicy(). + HasAutoSuspend(r.IntDefaultString). + HasAutoResume(r.BooleanDefault). + HasNoInitiallySuspended(). + HasNoResourceMonitor(). + HasEnableQueryAcceleration(r.BooleanDefault). + HasQueryAccelerationMaxScaleFactor(r.IntDefaultString). + HasDefaultMaxConcurrencyLevel(). + HasDefaultStatementQueuedTimeoutInSeconds(). + HasDefaultStatementTimeoutInSeconds() +} + +func (w *WarehouseParametersAssert) HasDefaultMaxConcurrencyLevel() *WarehouseParametersAssert { + return w. + HasMaxConcurrencyLevel(8). + HasMaxConcurrencyLevelLevel("") +} + +func (w *WarehouseParametersAssert) HasDefaultStatementQueuedTimeoutInSeconds() *WarehouseParametersAssert { + return w. + HasStatementQueuedTimeoutInSeconds(0). + HasStatementQueuedTimeoutInSecondsLevel("") +} + +func (w *WarehouseParametersAssert) HasDefaultStatementTimeoutInSeconds() *WarehouseParametersAssert { + return w. + HasStatementTimeoutInSeconds(172800). + HasStatementTimeoutInSecondsLevel("") +} +``` + +## Adding new Snowflake object assertions +For object `abc` create the following files with the described content in the `assert` package: +- `abc_snowflake.go` +```go +type AbcAssert struct { + *SnowflakeObjectAssert[sdk.Abc, sdk.AccountObjectIdentifier] +} + +func Abc(t *testing.T, id sdk.AccountObjectIdentifier) *AbcAssert { + t.Helper() + return &AbcAssert{ + NewSnowflakeObjectAssertWithProvider(sdk.ObjectTypeAbc, id, acc.TestClient().Abc.Show), + } +} + +func AbcFromObject(t *testing.T, abc *sdk.Abc) *AbcAssert { + t.Helper() + return &AbcAssert{ + NewSnowflakeObjectAssertWithObject(sdk.ObjectTypeAbc, abc.ID(), abc), + } +} +``` + +A method for each object parameter (let's say parameter name is xyz): +```go +func (w *AbcAssert) HasXyz(expected string) *AbcAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Abc) error { + t.Helper() + if o.Xyz != expected { + return fmt.Errorf("expected xyz: %v; got: %v", expected, o.Xyz) + } + return nil + }) + return w +} +``` + +- `abc_snowflake_ext.go` - for the easier separation later (when we start generating the common checks for each object). Example would be: +```go +func (w *WarehouseAssert) HasStateOneOf(expected ...sdk.WarehouseState) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if !slices.Contains(expected, o.State) { + return fmt.Errorf("expected state one of: %v; got: %v", expected, string(o.State)) + } + return nil + }) + return w +} +``` + +## Adding new models +For object `abc` create the following files with the described content in the `config` package: +- `abc_model.go` +```go +type AbcModel struct { + Xyz config.Variable `json:"xyz,omitempty"` + *resourceModelMeta +} +``` +two builders with required params only: +```go +func NewAbcModel( + resourceName string, + xyz string, +) *AbcModel { + m := &AbcModel{resourceModelMeta: meta(resourceName, resources.Abc)} + m.WithXyz(xyz) + return m +} + +func NewDefaultAbcModel( + xyz string, +) *AbcModel { + m := &AbcModel{resourceModelMeta: defaultMeta(resources.Abc)} + m.WithXyz(xyz) + return m +} +``` +Two methods for each param (with good value type and with any value type): +```go +func (m *AbcModel) WithXyz(xyz string) *AbcModel { + m.Xyz = config.StringVariable(xyz) + return m +} + +func (m *AbcModel) WithXyzValue(value config.Variable) *AbcModel { + m.Xyz = value + return m +} +``` + +- `abc_model_ext.go` - for the easier separation later (when we start generating the models for each object). Example would be: +```go +func BasicWarehouseModel( + name string, + comment string, +) *WarehouseModel { + return NewDefaultWarehouseModel(name).WithComment(comment) +} +``` + +## Known limitations/planned improvements +- Generate all assertions and models. +- Test all the utilities for assertion/model construction (public interfaces, methods, functions). +- Verify if all the config types are supported. +- Consider a better implementation for the model conversion to config (TODO left). +- Support additional methods for references in models (TODO left). +- Support depends_on in models (TODO left). +- Add a convenience function to concatenate multiple models (TODO left). +- Add function to support using `ConfigFile:` in the acceptance tests. +- Replace `acceptance/snowflakechecks` with the new proposed Snowflake objects assertions. +- Support `showOutputValueUnset` and add a second function for each `show_output` attribute. +- Support `resourceAssertionTypeValueNotSet` for import checks (`panic` left currently). +- Add assertions for the `describe_output`. +- Add support for datasource tests (assertions and config builders). +- Consider overriding the assertions when invoking same check multiple times with different params (e.g. `Warehouse(...).HasType(X).HasType(Y)`; it could use the last-check-wins approach, to more easily reuse complex checks between the test steps). +- Consider not adding the check for `show_output` presence on creation (same with `parameters`). The majority of the use cases need it to be present but there are a few others (like conditional presence in the datasources). Currently, it seems that they should be always present in the resources, so no change is made. Later, with adding the support for the datasource tests, consider simple destructive implementation like: +```go +func (w *WarehouseDatasourceShowOutputAssert) IsEmpty() { + w.assertions = make([]resourceAssertion, 0) + w.assertions = append(w.assertions, valueSet("show_output.#", "0")) +} +``` diff --git a/pkg/acceptance/bettertestspoc/assert/commons.go b/pkg/acceptance/bettertestspoc/assert/commons.go new file mode 100644 index 0000000000..59e1c86ffa --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/commons.go @@ -0,0 +1,103 @@ +package assert + +import ( + "errors" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +// TestCheckFuncProvider is an interface with just one method providing resource.TestCheckFunc. +// It allows using it as input the "Check:" in resource.TestStep. +// It should be used with AssertThat. +type TestCheckFuncProvider interface { + ToTerraformTestCheckFunc(t *testing.T) resource.TestCheckFunc +} + +// AssertThat should be used for "Check:" input in resource.TestStep instead of e.g. resource.ComposeTestCheckFunc. +// It allows performing all the checks implementing the TestCheckFuncProvider interface. +func AssertThat(t *testing.T, fs ...TestCheckFuncProvider) resource.TestCheckFunc { + t.Helper() + return func(s *terraform.State) error { + var result []error + + for i, f := range fs { + if err := f.ToTerraformTestCheckFunc(t)(s); err != nil { + result = append(result, fmt.Errorf("check %d/%d error:\n%w", i+1, len(fs), err)) + } + } + + return errors.Join(result...) + } +} + +var _ TestCheckFuncProvider = (*testCheckFuncWrapper)(nil) + +type testCheckFuncWrapper struct { + f resource.TestCheckFunc +} + +func (w *testCheckFuncWrapper) ToTerraformTestCheckFunc(_ *testing.T) resource.TestCheckFunc { + return w.f +} + +// Check allows using the basic terraform checks while using AssertThat. +// To use, just simply wrap the check in Check. +func Check(f resource.TestCheckFunc) TestCheckFuncProvider { + return &testCheckFuncWrapper{f} +} + +// ImportStateCheckFuncProvider is an interface with just one method providing resource.ImportStateCheckFunc. +// It allows using it as input the "ImportStateCheck:" in resource.TestStep for import tests. +// It should be used with AssertThatImport. +type ImportStateCheckFuncProvider interface { + ToTerraformImportStateCheckFunc(t *testing.T) resource.ImportStateCheckFunc +} + +// AssertThatImport should be used for "ImportStateCheck:" input in resource.TestStep instead of e.g. importchecks.ComposeImportStateCheck. +// It allows performing all the checks implementing the ImportStateCheckFuncProvider interface. +func AssertThatImport(t *testing.T, fs ...ImportStateCheckFuncProvider) resource.ImportStateCheckFunc { + t.Helper() + return func(s []*terraform.InstanceState) error { + var result []error + + for i, f := range fs { + if err := f.ToTerraformImportStateCheckFunc(t)(s); err != nil { + result = append(result, fmt.Errorf("check %d/%d error:\n%w", i+1, len(fs), err)) + } + } + + return errors.Join(result...) + } +} + +var _ ImportStateCheckFuncProvider = (*importStateCheckFuncWrapper)(nil) + +type importStateCheckFuncWrapper struct { + f resource.ImportStateCheckFunc +} + +func (w *importStateCheckFuncWrapper) ToTerraformImportStateCheckFunc(_ *testing.T) resource.ImportStateCheckFunc { + return w.f +} + +// CheckImport allows using the basic terraform import checks while using AssertThatImport. +// To use, just simply wrap the check in CheckImport. +func CheckImport(f resource.ImportStateCheckFunc) ImportStateCheckFuncProvider { + return &importStateCheckFuncWrapper{f} +} + +// InPlaceAssertionVerifier is an interface providing a method allowing verifying all the prepared assertions in place. +// It does not return function like TestCheckFuncProvider or ImportStateCheckFuncProvider; it runs all the assertions in place instead. +type InPlaceAssertionVerifier interface { + VerifyAll(t *testing.T) +} + +// AssertThatObject should be used in the SDK tests for created object validation. +// It verifies all the prepared assertions in place. +func AssertThatObject(t *testing.T, objectAssert InPlaceAssertionVerifier) { + t.Helper() + objectAssert.VerifyAll(t) +} diff --git a/pkg/acceptance/bettertestspoc/assert/resource_assertions.go b/pkg/acceptance/bettertestspoc/assert/resource_assertions.go new file mode 100644 index 0000000000..99c4b203bb --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resource_assertions.go @@ -0,0 +1,133 @@ +package assert + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/importchecks" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +var ( + _ TestCheckFuncProvider = (*ResourceAssert)(nil) + _ ImportStateCheckFuncProvider = (*ResourceAssert)(nil) +) + +// ResourceAssert is an embeddable struct that should be used to construct new resource assertions (for resource, show output, parameters, etc.). +// It implements both TestCheckFuncProvider and ImportStateCheckFuncProvider which makes it easy to create new resource assertions. +type ResourceAssert struct { + name string + id string + prefix string + assertions []resourceAssertion +} + +// NewResourceAssert creates a ResourceAssert where the resource name should be used as a key for assertions. +func NewResourceAssert(name string, prefix string) *ResourceAssert { + return &ResourceAssert{ + name: name, + prefix: prefix, + assertions: make([]resourceAssertion, 0), + } +} + +// NewImportedResourceAssert creates a ResourceAssert where the resource id should be used as a key for assertions. +func NewImportedResourceAssert(id string, prefix string) *ResourceAssert { + return &ResourceAssert{ + id: id, + prefix: prefix, + assertions: make([]resourceAssertion, 0), + } +} + +type resourceAssertionType string + +const ( + resourceAssertionTypeValueSet = "VALUE_SET" + resourceAssertionTypeValueNotSet = "VALUE_NOT_SET" +) + +type resourceAssertion struct { + fieldName string + expectedValue string + resourceAssertionType resourceAssertionType +} + +func valueSet(fieldName string, expected string) resourceAssertion { + return resourceAssertion{fieldName: fieldName, expectedValue: expected, resourceAssertionType: resourceAssertionTypeValueSet} +} + +func valueNotSet(fieldName string) resourceAssertion { + return resourceAssertion{fieldName: fieldName, resourceAssertionType: resourceAssertionTypeValueNotSet} +} + +const showOutputPrefix = "show_output.0." + +func showOutputValueSet(fieldName string, expected string) resourceAssertion { + return resourceAssertion{fieldName: showOutputPrefix + fieldName, expectedValue: expected, resourceAssertionType: resourceAssertionTypeValueSet} +} + +const ( + parametersPrefix = "parameters.0." + parametersValueSuffix = ".0.value" + parametersLevelSuffix = ".0.level" +) + +func parameterValueSet(fieldName string, expected string) resourceAssertion { + return resourceAssertion{fieldName: parametersPrefix + fieldName + parametersValueSuffix, expectedValue: expected, resourceAssertionType: resourceAssertionTypeValueSet} +} + +func parameterLevelSet(fieldName string, expected string) resourceAssertion { + return resourceAssertion{fieldName: parametersPrefix + fieldName + parametersLevelSuffix, expectedValue: expected, resourceAssertionType: resourceAssertionTypeValueSet} +} + +// ToTerraformTestCheckFunc implements TestCheckFuncProvider to allow easier creation of new resource assertions. +// It goes through all the assertion accumulated earlier and gathers the results of the checks. +func (r *ResourceAssert) ToTerraformTestCheckFunc(t *testing.T) resource.TestCheckFunc { + t.Helper() + return func(s *terraform.State) error { + var result []error + + for i, a := range r.assertions { + switch a.resourceAssertionType { + case resourceAssertionTypeValueSet: + if err := resource.TestCheckResourceAttr(r.name, a.fieldName, a.expectedValue)(s); err != nil { + errCut, _ := strings.CutPrefix(err.Error(), fmt.Sprintf("%s: ", r.name)) + result = append(result, fmt.Errorf("%s %s assertion [%d/%d]: failed with error: %s", r.name, r.prefix, i+1, len(r.assertions), errCut)) + } + case resourceAssertionTypeValueNotSet: + if err := resource.TestCheckNoResourceAttr(r.name, a.fieldName)(s); err != nil { + errCut, _ := strings.CutPrefix(err.Error(), fmt.Sprintf("%s: ", r.name)) + result = append(result, fmt.Errorf("%s %s assertion [%d/%d]: failed with error: %s", r.name, r.prefix, i+1, len(r.assertions), errCut)) + } + } + } + + return errors.Join(result...) + } +} + +// ToTerraformImportStateCheckFunc implements ImportStateCheckFuncProvider to allow easier creation of new resource assertions. +// It goes through all the assertion accumulated earlier and gathers the results of the checks. +func (r *ResourceAssert) ToTerraformImportStateCheckFunc(t *testing.T) resource.ImportStateCheckFunc { + t.Helper() + return func(s []*terraform.InstanceState) error { + var result []error + + for i, a := range r.assertions { + switch a.resourceAssertionType { + case resourceAssertionTypeValueSet: + if err := importchecks.TestCheckResourceAttrInstanceState(r.id, a.fieldName, a.expectedValue)(s); err != nil { + result = append(result, fmt.Errorf("%s %s assertion [%d/%d]: failed with error: %w", r.id, r.prefix, i+1, len(r.assertions), err)) + } + case resourceAssertionTypeValueNotSet: + panic("implement") + } + } + + return errors.Join(result...) + } +} diff --git a/pkg/acceptance/bettertestspoc/assert/snowflake_assertions.go b/pkg/acceptance/bettertestspoc/assert/snowflake_assertions.go new file mode 100644 index 0000000000..ec6d9b234a --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/snowflake_assertions.go @@ -0,0 +1,103 @@ +package assert + +import ( + "errors" + "fmt" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/require" +) + +type ( + assertSdk[T any] func(*testing.T, T) error + objectProvider[T any, I sdk.ObjectIdentifier] func(*testing.T, I) (*T, error) +) + +// SnowflakeObjectAssert is an embeddable struct that should be used to construct new Snowflake object assertions. +// It implements both TestCheckFuncProvider and ImportStateCheckFuncProvider which makes it easy to create new resource assertions. +type SnowflakeObjectAssert[T any, I sdk.ObjectIdentifier] struct { + assertions []assertSdk[*T] + id I + objectType sdk.ObjectType + object *T + provider objectProvider[T, I] +} + +// NewSnowflakeObjectAssertWithProvider creates a SnowflakeObjectAssert with id and the provider. +// Object to check is lazily fetched from Snowflake when the checks are being run. +func NewSnowflakeObjectAssertWithProvider[T any, I sdk.ObjectIdentifier](objectType sdk.ObjectType, id I, provider objectProvider[T, I]) *SnowflakeObjectAssert[T, I] { + return &SnowflakeObjectAssert[T, I]{ + assertions: make([]assertSdk[*T], 0), + id: id, + objectType: objectType, + provider: provider, + } +} + +// NewSnowflakeObjectAssertWithObject creates a SnowflakeObjectAssert with object that was already fetched from Snowflake. +// All the checks are run against the given object. +func NewSnowflakeObjectAssertWithObject[T any, I sdk.ObjectIdentifier](objectType sdk.ObjectType, id I, object *T) *SnowflakeObjectAssert[T, I] { + return &SnowflakeObjectAssert[T, I]{ + assertions: make([]assertSdk[*T], 0), + id: id, + objectType: objectType, + object: object, + } +} + +// ToTerraformTestCheckFunc implements TestCheckFuncProvider to allow easier creation of new Snowflake object assertions. +// It goes through all the assertion accumulated earlier and gathers the results of the checks. +func (s *SnowflakeObjectAssert[_, _]) ToTerraformTestCheckFunc(t *testing.T) resource.TestCheckFunc { + t.Helper() + return func(_ *terraform.State) error { + return s.runSnowflakeObjectsAssertions(t) + } +} + +// ToTerraformImportStateCheckFunc implements ImportStateCheckFuncProvider to allow easier creation of new Snowflake object assertions. +// It goes through all the assertion accumulated earlier and gathers the results of the checks. +func (s *SnowflakeObjectAssert[_, _]) ToTerraformImportStateCheckFunc(t *testing.T) resource.ImportStateCheckFunc { + t.Helper() + return func(_ []*terraform.InstanceState) error { + return s.runSnowflakeObjectsAssertions(t) + } +} + +// VerifyAll implements InPlaceAssertionVerifier to allow easier creation of new Snowflake object assertions. +// It verifies all the assertions accumulated earlier and gathers the results of the checks. +func (s *SnowflakeObjectAssert[_, _]) VerifyAll(t *testing.T) { + t.Helper() + err := s.runSnowflakeObjectsAssertions(t) + require.NoError(t, err) +} + +func (s *SnowflakeObjectAssert[T, _]) runSnowflakeObjectsAssertions(t *testing.T) error { + t.Helper() + + var sdkObject *T + var err error + switch { + case s.object != nil: + sdkObject = s.object + case s.provider != nil: + sdkObject, err = s.provider(t, s.id) + if err != nil { + return err + } + default: + return fmt.Errorf("cannot proceed with object %s[%s] assertion: object or provider must be specified", s.objectType, s.id.FullyQualifiedName()) + } + + var result []error + + for i, assertion := range s.assertions { + if err = assertion(t, sdkObject); err != nil { + result = append(result, fmt.Errorf("object %s[%s] assertion [%d/%d]: failed with error: %w", s.objectType, s.id.FullyQualifiedName(), i+1, len(s.assertions), err)) + } + } + + return errors.Join(result...) +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_parameters.go b/pkg/acceptance/bettertestspoc/assert/warehouse_parameters.go new file mode 100644 index 0000000000..169ea17955 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_parameters.go @@ -0,0 +1,60 @@ +package assert + +import ( + "strconv" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +type WarehouseParametersAssert struct { + *ResourceAssert +} + +func WarehouseParameters(t *testing.T, name string) *WarehouseParametersAssert { + t.Helper() + w := WarehouseParametersAssert{ + NewResourceAssert(name, "parameters"), + } + w.assertions = append(w.assertions, valueSet("parameters.#", "1")) + return &w +} + +func ImportedWarehouseParameters(t *testing.T, id string) *WarehouseParametersAssert { + t.Helper() + w := WarehouseParametersAssert{ + NewImportedResourceAssert(id, "imported parameters"), + } + w.assertions = append(w.assertions, valueSet("parameters.#", "1")) + return &w +} + +func (w *WarehouseParametersAssert) HasMaxConcurrencyLevel(expected int) *WarehouseParametersAssert { + w.assertions = append(w.assertions, parameterValueSet("max_concurrency_level", strconv.Itoa(expected))) + return w +} + +func (w *WarehouseParametersAssert) HasStatementQueuedTimeoutInSeconds(expected int) *WarehouseParametersAssert { + w.assertions = append(w.assertions, parameterValueSet("statement_queued_timeout_in_seconds", strconv.Itoa(expected))) + return w +} + +func (w *WarehouseParametersAssert) HasStatementTimeoutInSeconds(expected int) *WarehouseParametersAssert { + w.assertions = append(w.assertions, parameterValueSet("statement_timeout_in_seconds", strconv.Itoa(expected))) + return w +} + +func (w *WarehouseParametersAssert) HasMaxConcurrencyLevelLevel(expected sdk.ParameterType) *WarehouseParametersAssert { + w.assertions = append(w.assertions, parameterLevelSet("max_concurrency_level", string(expected))) + return w +} + +func (w *WarehouseParametersAssert) HasStatementQueuedTimeoutInSecondsLevel(expected sdk.ParameterType) *WarehouseParametersAssert { + w.assertions = append(w.assertions, parameterLevelSet("statement_queued_timeout_in_seconds", string(expected))) + return w +} + +func (w *WarehouseParametersAssert) HasStatementTimeoutInSecondsLevel(expected sdk.ParameterType) *WarehouseParametersAssert { + w.assertions = append(w.assertions, parameterLevelSet("statement_timeout_in_seconds", string(expected))) + return w +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_parameters_ext.go b/pkg/acceptance/bettertestspoc/assert/warehouse_parameters_ext.go new file mode 100644 index 0000000000..4e3a3ad231 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_parameters_ext.go @@ -0,0 +1,19 @@ +package assert + +func (w *WarehouseParametersAssert) HasDefaultMaxConcurrencyLevel() *WarehouseParametersAssert { + return w. + HasMaxConcurrencyLevel(8). + HasMaxConcurrencyLevelLevel("") +} + +func (w *WarehouseParametersAssert) HasDefaultStatementQueuedTimeoutInSeconds() *WarehouseParametersAssert { + return w. + HasStatementQueuedTimeoutInSeconds(0). + HasStatementQueuedTimeoutInSecondsLevel("") +} + +func (w *WarehouseParametersAssert) HasDefaultStatementTimeoutInSeconds() *WarehouseParametersAssert { + return w. + HasStatementTimeoutInSeconds(172800). + HasStatementTimeoutInSecondsLevel("") +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_resource.go b/pkg/acceptance/bettertestspoc/assert/warehouse_resource.go new file mode 100644 index 0000000000..474f59a387 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_resource.go @@ -0,0 +1,185 @@ +package assert + +import ( + "testing" +) + +type WarehouseResourceAssert struct { + *ResourceAssert +} + +func WarehouseResource(t *testing.T, name string) *WarehouseResourceAssert { + t.Helper() + + return &WarehouseResourceAssert{ + ResourceAssert: NewResourceAssert(name, "resource"), + } +} + +func ImportedWarehouseResource(t *testing.T, id string) *WarehouseResourceAssert { + t.Helper() + + return &WarehouseResourceAssert{ + ResourceAssert: NewImportedResourceAssert(id, "imported resource"), + } +} + +func (w *WarehouseResourceAssert) HasName(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("name", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasType(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("warehouse_type", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasSize(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("warehouse_size", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasMaxClusterCount(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("max_cluster_count", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasMinClusterCount(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("min_cluster_count", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasScalingPolicy(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("scaling_policy", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasAutoSuspend(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("auto_suspend", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasAutoResume(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("auto_resume", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasInitiallySuspended(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("initially_suspended", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasResourceMonitor(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("resource_monitor", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasComment(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("comment", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasEnableQueryAcceleration(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("enable_query_acceleration", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasQueryAccelerationMaxScaleFactor(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("query_acceleration_max_scale_factor", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasMaxConcurrencyLevel(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("max_concurrency_level", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasStatementQueuedTimeoutInSeconds(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("statement_queued_timeout_in_seconds", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasStatementTimeoutInSeconds(expected string) *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("statement_timeout_in_seconds", expected)) + return w +} + +func (w *WarehouseResourceAssert) HasNoName() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("name")) + return w +} + +func (w *WarehouseResourceAssert) HasNoType() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("warehouse_type")) + return w +} + +func (w *WarehouseResourceAssert) HasNoSize() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("warehouse_size")) + return w +} + +func (w *WarehouseResourceAssert) HasNoMaxClusterCount() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("max_cluster_count")) + return w +} + +func (w *WarehouseResourceAssert) HasNoMinClusterCount() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("min_cluster_count")) + return w +} + +func (w *WarehouseResourceAssert) HasNoScalingPolicy() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("scaling_policy")) + return w +} + +func (w *WarehouseResourceAssert) HasNoAutoSuspend() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("auto_suspend")) + return w +} + +func (w *WarehouseResourceAssert) HasNoAutoResume() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("auto_resume")) + return w +} + +func (w *WarehouseResourceAssert) HasNoInitiallySuspended() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("initially_suspended")) + return w +} + +func (w *WarehouseResourceAssert) HasNoResourceMonitor() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("resource_monitor")) + return w +} + +func (w *WarehouseResourceAssert) HasNoComment() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("comment")) + return w +} + +func (w *WarehouseResourceAssert) HasNoEnableQueryAcceleration() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("enable_query_acceleration")) + return w +} + +func (w *WarehouseResourceAssert) HasNoQueryAccelerationMaxScaleFactor() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("query_acceleration_max_scale_factor")) + return w +} + +func (w *WarehouseResourceAssert) HasNoMaxConcurrencyLevel() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("max_concurrency_level")) + return w +} + +func (w *WarehouseResourceAssert) HasNoStatementQueuedTimeoutInSeconds() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("statement_queued_timeout_in_seconds")) + return w +} + +func (w *WarehouseResourceAssert) HasNoStatementTimeoutInSeconds() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueNotSet("statement_timeout_in_seconds")) + return w +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_resource_ext.go b/pkg/acceptance/bettertestspoc/assert/warehouse_resource_ext.go new file mode 100644 index 0000000000..f246ae8f43 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_resource_ext.go @@ -0,0 +1,36 @@ +package assert + +import r "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" + +func (w *WarehouseResourceAssert) HasDefaultMaxConcurrencyLevel() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("max_concurrency_level", "8")) + return w +} + +func (w *WarehouseResourceAssert) HasDefaultStatementQueuedTimeoutInSeconds() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("statement_queued_timeout_in_seconds", "0")) + return w +} + +func (w *WarehouseResourceAssert) HasDefaultStatementTimeoutInSeconds() *WarehouseResourceAssert { + w.assertions = append(w.assertions, valueSet("statement_timeout_in_seconds", "172800")) + return w +} + +func (w *WarehouseResourceAssert) HasAllDefault() *WarehouseResourceAssert { + return w.HasDefaultMaxConcurrencyLevel(). + HasNoType(). + HasNoSize(). + HasNoMaxClusterCount(). + HasNoMinClusterCount(). + HasNoScalingPolicy(). + HasAutoSuspend(r.IntDefaultString). + HasAutoResume(r.BooleanDefault). + HasNoInitiallySuspended(). + HasNoResourceMonitor(). + HasEnableQueryAcceleration(r.BooleanDefault). + HasQueryAccelerationMaxScaleFactor(r.IntDefaultString). + HasDefaultMaxConcurrencyLevel(). + HasDefaultStatementQueuedTimeoutInSeconds(). + HasDefaultStatementTimeoutInSeconds() +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_show_output.go b/pkg/acceptance/bettertestspoc/assert/warehouse_show_output.go new file mode 100644 index 0000000000..068d97f76e --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_show_output.go @@ -0,0 +1,85 @@ +package assert + +import ( + "strconv" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +type WarehouseShowOutputAssert struct { + *ResourceAssert +} + +func WarehouseShowOutput(t *testing.T, name string) *WarehouseShowOutputAssert { + t.Helper() + w := WarehouseShowOutputAssert{ + NewResourceAssert(name, "show_output"), + } + w.assertions = append(w.assertions, valueSet("show_output.#", "1")) + return &w +} + +func ImportedWarehouseShowOutput(t *testing.T, id string) *WarehouseShowOutputAssert { + t.Helper() + w := WarehouseShowOutputAssert{ + NewImportedResourceAssert(id, "show_output"), + } + w.assertions = append(w.assertions, valueSet("show_output.#", "1")) + return &w +} + +func (w *WarehouseShowOutputAssert) HasType(expected sdk.WarehouseType) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("type", string(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasSize(expected sdk.WarehouseSize) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("size", string(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasMaxClusterCount(expected int) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("max_cluster_count", strconv.Itoa(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasMinClusterCount(expected int) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("min_cluster_count", strconv.Itoa(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasScalingPolicy(expected sdk.ScalingPolicy) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("scaling_policy", string(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasAutoSuspend(expected int) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("auto_suspend", strconv.Itoa(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasAutoResume(expected bool) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("auto_resume", strconv.FormatBool(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasResourceMonitor(expected string) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("resource_monitor", expected)) + return w +} + +func (w *WarehouseShowOutputAssert) HasComment(expected string) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("comment", expected)) + return w +} + +func (w *WarehouseShowOutputAssert) HasEnableQueryAcceleration(expected bool) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("enable_query_acceleration", strconv.FormatBool(expected))) + return w +} + +func (w *WarehouseShowOutputAssert) HasQueryAccelerationMaxScaleFactor(expected int) *WarehouseShowOutputAssert { + w.assertions = append(w.assertions, showOutputValueSet("query_acceleration_max_scale_factor", strconv.Itoa(expected))) + return w +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_snowflake.go b/pkg/acceptance/bettertestspoc/assert/warehouse_snowflake.go new file mode 100644 index 0000000000..c6be645f25 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_snowflake.go @@ -0,0 +1,171 @@ +package assert + +import ( + "fmt" + "testing" + + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +type WarehouseAssert struct { + *SnowflakeObjectAssert[sdk.Warehouse, sdk.AccountObjectIdentifier] +} + +func Warehouse(t *testing.T, id sdk.AccountObjectIdentifier) *WarehouseAssert { + t.Helper() + return &WarehouseAssert{ + NewSnowflakeObjectAssertWithProvider(sdk.ObjectTypeWarehouse, id, acc.TestClient().Warehouse.Show), + } +} + +func WarehouseFromObject(t *testing.T, warehouse *sdk.Warehouse) *WarehouseAssert { + t.Helper() + return &WarehouseAssert{ + NewSnowflakeObjectAssertWithObject(sdk.ObjectTypeWarehouse, warehouse.ID(), warehouse), + } +} + +func (w *WarehouseAssert) HasName(expected string) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.Name != expected { + return fmt.Errorf("expected name: %v; got: %v", expected, o.Name) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasState(expected sdk.WarehouseState) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.State != expected { + return fmt.Errorf("expected state: %v; got: %v", expected, string(o.State)) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasType(expected sdk.WarehouseType) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.Type != expected { + return fmt.Errorf("expected type: %v; got: %v", expected, string(o.Type)) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasSize(expected sdk.WarehouseSize) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.Size != expected { + return fmt.Errorf("expected size: %v; got: %v", expected, string(o.Size)) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasMinClusterCount(expected int) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.MinClusterCount != expected { + return fmt.Errorf("expected min cluster count: %v; got: %v", expected, o.MinClusterCount) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasMaxClusterCount(expected int) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.MaxClusterCount != expected { + return fmt.Errorf("expected max cluster count: %v; got: %v", expected, o.MaxClusterCount) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasAutoSuspend(expected int) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.AutoSuspend != expected { + return fmt.Errorf("expected auto suspend: %v; got: %v", expected, o.AutoSuspend) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasAutoResume(expected bool) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.AutoResume != expected { + return fmt.Errorf("expected auto resume: %v; got: %v", expected, o.AutoResume) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasComment(expected string) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.Comment != expected { + return fmt.Errorf("expected comment: %v; got: %v", expected, o.Comment) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasEnableQueryAcceleration(expected bool) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.EnableQueryAcceleration != expected { + return fmt.Errorf("expected enable query acceleration: %v; got: %v", expected, o.EnableQueryAcceleration) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasQueryAccelerationMaxScaleFactor(expected int) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.QueryAccelerationMaxScaleFactor != expected { + return fmt.Errorf("expected query acceleration max scale factor: %v; got: %v", expected, o.QueryAccelerationMaxScaleFactor) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasResourceMonitor(expected sdk.AccountObjectIdentifier) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.ResourceMonitor.Name() != expected.Name() { + return fmt.Errorf("expected resource monitor: %v; got: %v", expected.Name(), o.ResourceMonitor.Name()) + } + return nil + }) + return w +} + +func (w *WarehouseAssert) HasScalingPolicy(expected sdk.ScalingPolicy) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if o.ScalingPolicy != expected { + return fmt.Errorf("expected type: %v; got: %v", expected, string(o.ScalingPolicy)) + } + return nil + }) + return w +} diff --git a/pkg/acceptance/bettertestspoc/assert/warehouse_snowflake_ext.go b/pkg/acceptance/bettertestspoc/assert/warehouse_snowflake_ext.go new file mode 100644 index 0000000000..a9b835dd95 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/warehouse_snowflake_ext.go @@ -0,0 +1,20 @@ +package assert + +import ( + "fmt" + "slices" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +func (w *WarehouseAssert) HasStateOneOf(expected ...sdk.WarehouseState) *WarehouseAssert { + w.assertions = append(w.assertions, func(t *testing.T, o *sdk.Warehouse) error { + t.Helper() + if !slices.Contains(expected, o.State) { + return fmt.Errorf("expected state one of: %v; got: %v", expected, string(o.State)) + } + return nil + }) + return w +} diff --git a/pkg/acceptance/bettertestspoc/config/config.go b/pkg/acceptance/bettertestspoc/config/config.go new file mode 100644 index 0000000000..b38c9c6bc0 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/config.go @@ -0,0 +1,74 @@ +package config + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/stretchr/testify/require" +) + +// ResourceModel is the base interface all of our config models will implement. +// To allow easy implementation, resourceModelMeta can be embedded inside the struct (and the struct will automatically implement it). +type ResourceModel interface { + Resource() resources.Resource + ResourceName() string + SetResourceName(name string) +} + +type resourceModelMeta struct { + name string + resource resources.Resource +} + +func (m *resourceModelMeta) Resource() resources.Resource { + return m.resource +} + +func (m *resourceModelMeta) ResourceName() string { + return m.name +} + +func (m *resourceModelMeta) SetResourceName(name string) { + m.name = name +} + +// DefaultResourceName is exported to allow assertions against the resources using the default name. +const DefaultResourceName = "test" + +func defaultMeta(resource resources.Resource) *resourceModelMeta { + return &resourceModelMeta{name: DefaultResourceName, resource: resource} +} + +func meta(resourceName string, resource resources.Resource) *resourceModelMeta { + return &resourceModelMeta{name: resourceName, resource: resource} +} + +// FromModel should be used in terraform acceptance tests for Config attribute to get string config from ResourceModel. +// Current implementation is really straightforward but it could be improved and tested. It may not handle all cases (like objects, lists, sets) correctly. +// TODO: use reflection to build config directly from model struct (or some other different way) +// TODO: add support for config.TestStepConfigFunc (to use as ConfigFile); the naive implementation would be to just create a tmp directory and save file there +func FromModel(t *testing.T, model ResourceModel) string { + t.Helper() + + b, err := json.Marshal(model) + require.NoError(t, err) + + var objMap map[string]json.RawMessage + err = json.Unmarshal(b, &objMap) + require.NoError(t, err) + + var sb strings.Builder + sb.WriteString(fmt.Sprintf(`resource "%s" "%s" {`, model.Resource(), model.ResourceName())) + sb.WriteRune('\n') + for k, v := range objMap { + sb.WriteString(fmt.Sprintf("\t%s = %s\n", k, v)) + } + sb.WriteString(`}`) + sb.WriteRune('\n') + s := sb.String() + t.Logf("Generated config:\n%s", s) + return s +} diff --git a/pkg/acceptance/bettertestspoc/config/warehouse_model.go b/pkg/acceptance/bettertestspoc/config/warehouse_model.go new file mode 100644 index 0000000000..0a9a55cb4e --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/warehouse_model.go @@ -0,0 +1,221 @@ +package config + +import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-testing/config" +) + +// TODO: add possibility to have reference to another object (e.g. WithResourceMonitorReference); new config.Variable impl? +// TODO: add possibility to have depends_on to other resources (in meta?) +// TODO: add a convenience method to use multiple configs from multiple models +type WarehouseModel struct { + Name config.Variable `json:"name,omitempty"` + WarehouseType config.Variable `json:"warehouse_type,omitempty"` + WarehouseSize config.Variable `json:"warehouse_size,omitempty"` + MaxClusterCount config.Variable `json:"max_cluster_count,omitempty"` + MinClusterCount config.Variable `json:"min_cluster_count,omitempty"` + ScalingPolicy config.Variable `json:"scaling_policy,omitempty"` + AutoSuspend config.Variable `json:"auto_suspend,omitempty"` + AutoResume config.Variable `json:"auto_resume,omitempty"` + InitiallySuspended config.Variable `json:"initially_suspended,omitempty"` + ResourceMonitor config.Variable `json:"resource_monitor,omitempty"` + Comment config.Variable `json:"comment,omitempty"` + EnableQueryAcceleration config.Variable `json:"enable_query_acceleration,omitempty"` + QueryAccelerationMaxScaleFactor config.Variable `json:"query_acceleration_max_scale_factor,omitempty"` + + MaxConcurrencyLevel config.Variable `json:"max_concurrency_level,omitempty"` + StatementQueuedTimeoutInSeconds config.Variable `json:"statement_queued_timeout_in_seconds,omitempty"` + StatementTimeoutInSeconds config.Variable `json:"statement_timeout_in_seconds,omitempty"` + + *resourceModelMeta +} + +///////////////////////////////////////////////// +// Basic builders (resource name and required) // +///////////////////////////////////////////////// + +func NewWarehouseModel( + resourceName string, + name string, +) *WarehouseModel { + m := &WarehouseModel{resourceModelMeta: meta(resourceName, resources.Warehouse)} + m.WithName(name) + return m +} + +func NewDefaultWarehouseModel( + name string, +) *WarehouseModel { + m := &WarehouseModel{resourceModelMeta: defaultMeta(resources.Warehouse)} + m.WithName(name) + return m +} + +///////////////////////////////// +// below all the proper values // +///////////////////////////////// + +func (m *WarehouseModel) WithName(name string) *WarehouseModel { + m.Name = config.StringVariable(name) + return m +} + +func (m *WarehouseModel) WithWarehouseType(warehouseType sdk.WarehouseType) *WarehouseModel { + m.WarehouseType = config.StringVariable(string(warehouseType)) + return m +} + +func (m *WarehouseModel) WithWarehouseSize(warehouseSize sdk.WarehouseSize) *WarehouseModel { + m.WarehouseSize = config.StringVariable(string(warehouseSize)) + return m +} + +func (m *WarehouseModel) WithMaxClusterCount(maxClusterCount int) *WarehouseModel { + m.MaxClusterCount = config.IntegerVariable(maxClusterCount) + return m +} + +func (m *WarehouseModel) WithMinClusterCount(minClusterCount int) *WarehouseModel { + m.MinClusterCount = config.IntegerVariable(minClusterCount) + return m +} + +func (m *WarehouseModel) WithScalingPolicy(scalingPolicy sdk.ScalingPolicy) *WarehouseModel { + m.ScalingPolicy = config.StringVariable(string(scalingPolicy)) + return m +} + +func (m *WarehouseModel) WithAutoSuspend(autoSuspend int) *WarehouseModel { + m.AutoSuspend = config.IntegerVariable(autoSuspend) + return m +} + +func (m *WarehouseModel) WithAutoResume(autoResume bool) *WarehouseModel { + m.AutoResume = config.BoolVariable(autoResume) + return m +} + +func (m *WarehouseModel) WithInitiallySuspended(initiallySuspended bool) *WarehouseModel { + m.InitiallySuspended = config.BoolVariable(initiallySuspended) + return m +} + +func (m *WarehouseModel) WithResourceMonitor(resourceMonitor sdk.AccountObjectIdentifier) *WarehouseModel { + m.ResourceMonitor = config.StringVariable(resourceMonitor.Name()) + return m +} + +func (m *WarehouseModel) WithComment(comment string) *WarehouseModel { + m.Comment = config.StringVariable(comment) + return m +} + +func (m *WarehouseModel) WithEnableQueryAcceleration(enableQueryAcceleration bool) *WarehouseModel { + m.EnableQueryAcceleration = config.BoolVariable(enableQueryAcceleration) + return m +} + +func (m *WarehouseModel) WithQueryAccelerationMaxScaleFactor(queryAccelerationMaxScaleFactor int) *WarehouseModel { + m.QueryAccelerationMaxScaleFactor = config.IntegerVariable(queryAccelerationMaxScaleFactor) + return m +} + +func (m *WarehouseModel) WithMaxConcurrencyLevel(maxConcurrencyLevel int) *WarehouseModel { + m.MaxConcurrencyLevel = config.IntegerVariable(maxConcurrencyLevel) + return m +} + +func (m *WarehouseModel) WithStatementQueuedTimeoutInSeconds(statementQueuedTimeoutInSeconds int) *WarehouseModel { + m.StatementQueuedTimeoutInSeconds = config.IntegerVariable(statementQueuedTimeoutInSeconds) + return m +} + +func (m *WarehouseModel) WithStatementTimeoutInSeconds(statementTimeoutInSeconds int) *WarehouseModel { + m.StatementTimeoutInSeconds = config.IntegerVariable(statementTimeoutInSeconds) + return m +} + +////////////////////////////////////////// +// below it's possible to set any value // +////////////////////////////////////////// + +func (m *WarehouseModel) WithNameValue(value config.Variable) *WarehouseModel { + m.Name = value + return m +} + +func (m *WarehouseModel) WithWarehouseTypeValue(value config.Variable) *WarehouseModel { + m.WarehouseType = value + return m +} + +func (m *WarehouseModel) WithWarehouseSizeValue(value config.Variable) *WarehouseModel { + m.WarehouseSize = value + return m +} + +func (m *WarehouseModel) WithMaxClusterCountValue(value config.Variable) *WarehouseModel { + m.MaxClusterCount = value + return m +} + +func (m *WarehouseModel) WithMinClusterCountValue(value config.Variable) *WarehouseModel { + m.MinClusterCount = value + return m +} + +func (m *WarehouseModel) WithScalingPolicyValue(value config.Variable) *WarehouseModel { + m.ScalingPolicy = value + return m +} + +func (m *WarehouseModel) WithAutoSuspendValue(value config.Variable) *WarehouseModel { + m.AutoSuspend = value + return m +} + +func (m *WarehouseModel) WithAutoResumeValue(value config.Variable) *WarehouseModel { + m.AutoResume = value + return m +} + +func (m *WarehouseModel) WithInitiallySuspendedValue(value config.Variable) *WarehouseModel { + m.InitiallySuspended = value + return m +} + +func (m *WarehouseModel) WithResourceMonitorValue(value config.Variable) *WarehouseModel { + m.ResourceMonitor = value + return m +} + +func (m *WarehouseModel) WithCommentValue(value config.Variable) *WarehouseModel { + m.Comment = value + return m +} + +func (m *WarehouseModel) WithEnableQueryAccelerationValue(value config.Variable) *WarehouseModel { + m.EnableQueryAcceleration = value + return m +} + +func (m *WarehouseModel) WithQueryAccelerationMaxScaleFactorValue(value config.Variable) *WarehouseModel { + m.QueryAccelerationMaxScaleFactor = value + return m +} + +func (m *WarehouseModel) WithMaxConcurrencyLevelValue(value config.Variable) *WarehouseModel { + m.MaxConcurrencyLevel = value + return m +} + +func (m *WarehouseModel) WithStatementQueuedTimeoutInSecondsValue(value config.Variable) *WarehouseModel { + m.StatementQueuedTimeoutInSeconds = value + return m +} + +func (m *WarehouseModel) WithStatementTimeoutInSecondsValue(value config.Variable) *WarehouseModel { + m.StatementTimeoutInSeconds = value + return m +} diff --git a/pkg/acceptance/bettertestspoc/config/warehouse_model_ext.go b/pkg/acceptance/bettertestspoc/config/warehouse_model_ext.go new file mode 100644 index 0000000000..11eba4f7ab --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/warehouse_model_ext.go @@ -0,0 +1,8 @@ +package config + +func BasicWarehouseModel( + name string, + comment string, +) *WarehouseModel { + return NewDefaultWarehouseModel(name).WithComment(comment) +} diff --git a/pkg/acceptance/importchecks/import_checks.go b/pkg/acceptance/importchecks/import_checks.go index a04eecf8ec..e71c6c86bf 100644 --- a/pkg/acceptance/importchecks/import_checks.go +++ b/pkg/acceptance/importchecks/import_checks.go @@ -46,7 +46,7 @@ func TestCheckResourceAttrInstanceState(id string, attributeName, attributeValue if attrVal, ok := v.Attributes[attributeName]; ok { if attrVal != attributeValue { - return fmt.Errorf("expected: %s got: %s", attributeValue, attrVal) + return fmt.Errorf("expected: %s, got: %s", attributeValue, attrVal) } return nil diff --git a/pkg/resources/warehouse.go b/pkg/resources/warehouse.go index 22d4a863ed..3641df2272 100644 --- a/pkg/resources/warehouse.go +++ b/pkg/resources/warehouse.go @@ -482,7 +482,7 @@ func UpdateWarehouse(ctx context.Context, d *schema.ResourceData, meta any) diag } else { // TODO [SNOW-1473453]: UNSET of type does not work // unset.WarehouseType = sdk.Bool(true) - set.WarehouseType = &sdk.WarehouseTypeStandard + set.WarehouseType = sdk.Pointer(sdk.WarehouseTypeStandard) } } if d.HasChange("warehouse_size") { @@ -519,7 +519,7 @@ func UpdateWarehouse(ctx context.Context, d *schema.ResourceData, meta any) diag } else { // TODO [SNOW-1473453]: UNSET of scaling policy does not work // unset.ScalingPolicy = sdk.Bool(true) - set.ScalingPolicy = &sdk.ScalingPolicyStandard + set.ScalingPolicy = sdk.Pointer(sdk.ScalingPolicyStandard) } } if d.HasChange("auto_suspend") { diff --git a/pkg/resources/warehouse_acceptance_test.go b/pkg/resources/warehouse_acceptance_test.go index 41c520c5bc..5536a39c5e 100644 --- a/pkg/resources/warehouse_acceptance_test.go +++ b/pkg/resources/warehouse_acceptance_test.go @@ -10,6 +10,8 @@ import ( r "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" tfjson "github.com/hashicorp/terraform-json" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/importchecks" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/planchecks" @@ -36,6 +38,10 @@ func TestAcc_Warehouse_BasicFlows(t *testing.T) { t.Cleanup(resourceMonitorCleanup) resourceMonitorId := resourceMonitor.ID() + model := config.NewWarehouseModel("w", name).WithComment(comment) + // alternatively we can add an extension func + _ = config.BasicWarehouseModel(name, comment) + resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, PreCheck: func() { acc.TestAccPreCheck(t) }, @@ -45,74 +51,113 @@ func TestAcc_Warehouse_BasicFlows(t *testing.T) { CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), Steps: []resource.TestStep{ { - Config: warehouseBasicConfigWithComment(name, comment), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", name), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "warehouse_type"), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "warehouse_size"), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "max_cluster_count"), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "min_cluster_count"), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "scaling_policy"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", r.IntDefaultString), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", r.BooleanDefault), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), - resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "resource_monitor"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", comment), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", r.BooleanDefault), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", r.IntDefaultString), - - resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "8"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "0"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "172800"), - - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.type", string(sdk.WarehouseTypeStandard)), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.size", string(sdk.WarehouseSizeXSmall)), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.max_cluster_count", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.min_cluster_count", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.scaling_policy", string(sdk.ScalingPolicyStandard)), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_suspend", "600"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_resume", "true"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.resource_monitor", ""), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.comment", comment), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.enable_query_acceleration", "false"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.query_acceleration_max_scale_factor", "8"), - - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.#", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.max_concurrency_level.0.value", "8"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_queued_timeout_in_seconds.0.value", "0"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "172800"), + Config: config.FromModel(t, model), + Check: assert.AssertThat(t, + assert.WarehouseResource(t, "snowflake_warehouse.w"). + HasName(name). + HasNoType(). + HasNoSize(). + HasNoMaxClusterCount(). + HasNoMinClusterCount(). + HasNoScalingPolicy(). + HasAutoSuspend(r.IntDefaultString). + HasAutoResume(r.BooleanDefault). + HasNoInitiallySuspended(). + HasNoResourceMonitor(). + HasComment(comment). + HasEnableQueryAcceleration(r.BooleanDefault). + HasQueryAccelerationMaxScaleFactor(r.IntDefaultString). + HasMaxConcurrencyLevel("8"). + HasStatementQueuedTimeoutInSeconds("0"). + HasStatementTimeoutInSeconds("172800"). + // alternatively extensions possible: + HasDefaultMaxConcurrencyLevel(). + HasDefaultStatementQueuedTimeoutInSeconds(). + HasDefaultStatementTimeoutInSeconds(). + // alternatively extension possible + HasAllDefault(), + assert.WarehouseShowOutput(t, "snowflake_warehouse.w"). + HasType(sdk.WarehouseTypeStandard). + HasSize(sdk.WarehouseSizeXSmall). + HasMaxClusterCount(1). + HasMinClusterCount(1). + HasScalingPolicy(sdk.ScalingPolicyStandard). + HasAutoSuspend(600). + HasAutoResume(true). + HasResourceMonitor(""). + HasComment(comment). + HasEnableQueryAcceleration(false). + HasQueryAccelerationMaxScaleFactor(8), + assert.WarehouseParameters(t, "snowflake_warehouse.w"). + HasMaxConcurrencyLevel(8). + HasStatementQueuedTimeoutInSeconds(0). + HasStatementTimeoutInSeconds(172800). + // alternatively extensions possible: + HasDefaultMaxConcurrencyLevel(). + HasDefaultStatementQueuedTimeoutInSeconds(). + HasDefaultStatementTimeoutInSeconds(), + assert.Warehouse(t, warehouseId). + HasName(warehouseId.Name()). + HasState(sdk.WarehouseStateStarted). + HasType(sdk.WarehouseTypeStandard). + HasSize(sdk.WarehouseSizeXSmall). + HasMaxClusterCount(1). + HasMinClusterCount(1). + HasScalingPolicy(sdk.ScalingPolicyStandard). + HasAutoSuspend(600). + HasAutoResume(true). + HasResourceMonitor(sdk.AccountObjectIdentifier{}). + HasComment(comment). + HasEnableQueryAcceleration(false). + HasQueryAccelerationMaxScaleFactor(8), + // we can still use normal checks + assert.Check(resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", warehouseId.Name())), ), }, // IMPORT after empty config (in this method, most of the attributes will be filled with the defaults acquired from Snowflake) { ResourceName: "snowflake_warehouse.w", ImportState: true, - ImportStateCheck: importchecks.ComposeImportStateCheck( - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "name", name), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "warehouse_type", string(sdk.WarehouseTypeStandard)), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "warehouse_size", string(sdk.WarehouseSizeXSmall)), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "max_cluster_count", "1"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "min_cluster_count", "1"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "scaling_policy", string(sdk.ScalingPolicyStandard)), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "auto_suspend", "600"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "auto_resume", "true"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "resource_monitor", ""), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "comment", comment), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "enable_query_acceleration", "false"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "query_acceleration_max_scale_factor", "8"), - - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "max_concurrency_level", "8"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "statement_queued_timeout_in_seconds", "0"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "statement_timeout_in_seconds", "172800"), - - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.#", "1"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.0.max_concurrency_level.0.value", "8"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.0.max_concurrency_level.0.level", ""), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.0.statement_queued_timeout_in_seconds.0.value", "0"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.0.statement_queued_timeout_in_seconds.0.level", ""), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.0.statement_timeout_in_seconds.0.value", "172800"), - importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "parameters.0.statement_timeout_in_seconds.0.level", ""), + ImportStateCheck: assert.AssertThatImport(t, + assert.CheckImport(importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "name", name)), + assert.ImportedWarehouseResource(t, warehouseId.Name()). + HasName(name). + HasType(string(sdk.WarehouseTypeStandard)). + HasSize(string(sdk.WarehouseSizeXSmall)). + HasMaxClusterCount("1"). + HasMinClusterCount("1"). + HasScalingPolicy(string(sdk.ScalingPolicyStandard)). + HasAutoSuspend("600"). + HasAutoResume("true"). + HasResourceMonitor(""). + HasComment(comment). + HasEnableQueryAcceleration("false"). + HasQueryAccelerationMaxScaleFactor("8"). + HasDefaultMaxConcurrencyLevel(). + HasDefaultStatementQueuedTimeoutInSeconds(). + HasDefaultStatementTimeoutInSeconds(), + assert.ImportedWarehouseShowOutput(t, warehouseId.Name()), + assert.ImportedWarehouseParameters(t, warehouseId.Name()). + HasMaxConcurrencyLevel(8). + HasMaxConcurrencyLevelLevel(""). + HasStatementQueuedTimeoutInSeconds(0). + HasStatementQueuedTimeoutInSecondsLevel(""). + HasStatementTimeoutInSeconds(172800). + HasStatementTimeoutInSecondsLevel(""), + assert.Warehouse(t, warehouseId). + HasName(warehouseId.Name()). + HasState(sdk.WarehouseStateStarted). + HasType(sdk.WarehouseTypeStandard). + HasSize(sdk.WarehouseSizeXSmall). + HasMaxClusterCount(1). + HasMinClusterCount(1). + HasScalingPolicy(sdk.ScalingPolicyStandard). + HasAutoSuspend(600). + HasAutoResume(true). + HasResourceMonitor(sdk.AccountObjectIdentifier{}). + HasComment(comment). + HasEnableQueryAcceleration(false). + HasQueryAccelerationMaxScaleFactor(8), ), }, // RENAME diff --git a/pkg/sdk/cortex_search_services_def.go b/pkg/sdk/cortex_search_services_def.go index f97a2a6ffc..e59dbebee0 100644 --- a/pkg/sdk/cortex_search_services_def.go +++ b/pkg/sdk/cortex_search_services_def.go @@ -81,29 +81,39 @@ var CortexSearchServiceDef = g.NewInterface( g.DescriptionMappingKindSingleValue, "https://docs.snowflake.com/LIMITEDACCESS/cortex-search/sql/desc-cortex-search", g.DbStruct("cortexSearchServiceDetailsRow"). - Field("name", "string"). - Field("schema", "string"). - Field("database", "string"). - Field("warehouse", "string"). - Field("target_lag", "string"). - Field("search_column", "string"). - OptionalText("included_columns"). - Field("service_url", "string"). - OptionalText("refreshed_on"). - OptionalNumber("num_rows_indexed"). - OptionalText("comment"), + Text("created_on"). + Text("name"). + Text("database_name"). + Text("schema_name"). + Text("target_lag"). + Text("warehouse"). + OptionalText("search_column"). + OptionalText("attribute_columns"). + OptionalText("columns"). + OptionalText("definition"). + OptionalText("comment"). + Text("service_query_url"). + Text("data_timestamp"). + Number("source_data_num_rows"). + Text("indexing_state"). + OptionalText("indexing_error"), g.PlainStruct("CortexSearchServiceDetails"). - Field("Name", "string"). - Field("Schema", "string"). - Field("Database", "string"). - Field("Warehouse", "string"). - Field("TargetLag", "string"). - Field("On", "string"). - Field("Attributes", "[]string"). - Field("ServiceUrl", "string"). - Field("RefreshedOn", "string"). - Field("NumRowsIndexed", "int"). - Field("Comment", "string"), + Text("CreatedOn"). + Text("Name"). + Text("DatabaseName"). + Text("SchemaName"). + Text("TargetLag"). + Text("Warehouse"). + OptionalText("SearchColumn"). + Field("AttributeColumns", "[]string"). + Field("Columns", "[]string"). + OptionalText("Definition"). + OptionalText("Comment"). + Text("ServiceQueryUrl"). + Text("DataTimestamp"). + Number("SourceDataNumRows"). + Text("IndexingState"). + OptionalText("IndexingError"), g.NewQueryStruct("DescribeCortexSearchService"). Describe(). SQL("CORTEX SEARCH SERVICE"). diff --git a/pkg/sdk/cortex_search_services_gen.go b/pkg/sdk/cortex_search_services_gen.go index 2c62f7cd8e..9e76154603 100644 --- a/pkg/sdk/cortex_search_services_gen.go +++ b/pkg/sdk/cortex_search_services_gen.go @@ -83,30 +83,40 @@ type DescribeCortexSearchServiceOptions struct { name SchemaObjectIdentifier `ddl:"identifier"` } type cortexSearchServiceDetailsRow struct { - Name string `db:"name"` - Schema string `db:"schema"` - Database string `db:"database"` - Warehouse string `db:"warehouse"` - TargetLag string `db:"target_lag"` - SearchColumn string `db:"search_column"` - IncludedColumns sql.NullString `db:"included_columns"` - ServiceUrl string `db:"service_url"` - RefreshedOn sql.NullString `db:"refreshed_on"` - NumRowsIndexed sql.NullInt64 `db:"num_rows_indexed"` - Comment sql.NullString `db:"comment"` + CreatedOn string `db:"created_on"` + Name string `db:"name"` + DatabaseName string `db:"database_name"` + SchemaName string `db:"schema_name"` + TargetLag string `db:"target_lag"` + Warehouse string `db:"warehouse"` + SearchColumn sql.NullString `db:"search_column"` + AttributeColumns sql.NullString `db:"attribute_columns"` + Columns sql.NullString `db:"columns"` + Definition sql.NullString `db:"definition"` + Comment sql.NullString `db:"comment"` + ServiceQueryUrl string `db:"service_query_url"` + DataTimestamp string `db:"data_timestamp"` + SourceDataNumRows int `db:"source_data_num_rows"` + IndexingState string `db:"indexing_state"` + IndexingError sql.NullString `db:"indexing_error"` } type CortexSearchServiceDetails struct { - Name string - Schema string - Database string - Warehouse string - TargetLag string - On string - Attributes []string - ServiceUrl string - RefreshedOn string - NumRowsIndexed int - Comment string + CreatedOn string + Name string + DatabaseName string + SchemaName string + TargetLag string + Warehouse string + SearchColumn *string + AttributeColumns []string + Columns []string + Definition *string + Comment *string + ServiceQueryUrl string + DataTimestamp string + SourceDataNumRows int + IndexingState string + IndexingError *string } // DropCortexSearchServiceOptions is based on https://docs.snowflake.com/LIMITEDACCESS/cortex-search/sql/drop-cortex-search. diff --git a/pkg/sdk/cortex_search_services_impl_gen.go b/pkg/sdk/cortex_search_services_impl_gen.go index f88edefe28..e9968da053 100644 --- a/pkg/sdk/cortex_search_services_impl_gen.go +++ b/pkg/sdk/cortex_search_services_impl_gen.go @@ -131,25 +131,34 @@ func (r *DescribeCortexSearchServiceRequest) toOpts() *DescribeCortexSearchServi func (r cortexSearchServiceDetailsRow) convert() *CortexSearchServiceDetails { row := &CortexSearchServiceDetails{ - Name: r.Name, - Schema: r.Schema, - Database: r.Database, - Warehouse: r.Warehouse, - TargetLag: r.TargetLag, - On: r.SearchColumn, - ServiceUrl: r.ServiceUrl, + CreatedOn: r.CreatedOn, + Name: r.Name, + DatabaseName: r.DatabaseName, + SchemaName: r.SchemaName, + TargetLag: r.TargetLag, + Warehouse: r.Warehouse, + ServiceQueryUrl: r.ServiceQueryUrl, + DataTimestamp: r.DataTimestamp, + SourceDataNumRows: r.SourceDataNumRows, + IndexingState: r.IndexingState, } - if r.IncludedColumns.Valid { - row.Attributes = strings.Split(r.IncludedColumns.String, ",") + if r.SearchColumn.Valid { + row.SearchColumn = String(r.SearchColumn.String) } - if r.NumRowsIndexed.Valid { - row.NumRowsIndexed = int(r.NumRowsIndexed.Int64) + if r.AttributeColumns.Valid { + row.AttributeColumns = strings.Split(r.AttributeColumns.String, ",") } - if r.RefreshedOn.Valid { - row.RefreshedOn = r.RefreshedOn.String + if r.Columns.Valid { + row.Columns = strings.Split(r.Columns.String, ",") + } + if r.Definition.Valid { + row.Definition = String(r.Definition.String) } if r.Comment.Valid { - row.Comment = r.Comment.String + row.Comment = String(r.Comment.String) + } + if r.IndexingError.Valid { + row.IndexingError = String(r.IndexingError.String) } return row diff --git a/pkg/sdk/tasks_gen_test.go b/pkg/sdk/tasks_gen_test.go index ee312de048..0278e1803b 100644 --- a/pkg/sdk/tasks_gen_test.go +++ b/pkg/sdk/tasks_gen_test.go @@ -55,7 +55,7 @@ func TestTasks_Create(t *testing.T) { t.Run("with initial warehouse size", func(t *testing.T) { req := NewCreateTaskRequest(id, sql). - WithWarehouse(NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(&WarehouseSizeXSmall)) + WithWarehouse(NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(Pointer(WarehouseSizeXSmall))) assertOptsValidAndSQLEquals(t, req.toOpts(), "CREATE TASK %s USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' AS %s", id.FullyQualifiedName(), sql) }) @@ -177,7 +177,7 @@ func TestTasks_Alter(t *testing.T) { opts := defaultOpts() opts.Set = &TaskSet{} opts.Set.Warehouse = &warehouseId - opts.Set.UserTaskManagedInitialWarehouseSize = &WarehouseSizeXSmall + opts.Set.UserTaskManagedInitialWarehouseSize = Pointer(WarehouseSizeXSmall) assertOptsInvalidJoinedErrors(t, opts, errOneOf("AlterTaskOptions.Set", "Warehouse", "UserTaskManagedInitialWarehouseSize")) }) diff --git a/pkg/sdk/testint/cortex_search_services_integration_test.go b/pkg/sdk/testint/cortex_search_services_integration_test.go index 1200afae2b..6397a86865 100644 --- a/pkg/sdk/testint/cortex_search_services_integration_test.go +++ b/pkg/sdk/testint/cortex_search_services_integration_test.go @@ -76,15 +76,23 @@ func TestInt_CortexSearchServices(t *testing.T) { cortexSearchServiceDetails, err := client.CortexSearchServices.Describe(ctx, cortexSearchService.ID()) require.NoError(t, err) + assert.NotEmpty(t, cortexSearchServiceDetails.CreatedOn) assert.Equal(t, cortexSearchService.Name, cortexSearchServiceDetails.Name) - assert.Equal(t, cortexSearchService.SchemaName, cortexSearchServiceDetails.Schema) - assert.Equal(t, cortexSearchService.DatabaseName, cortexSearchServiceDetails.Database) - assert.NotEmpty(t, cortexSearchServiceDetails.Warehouse) + // Yes, the names are exchanged on purpose, because now it works like this + assert.Equal(t, cortexSearchService.DatabaseName, cortexSearchServiceDetails.SchemaName) + assert.Equal(t, cortexSearchService.SchemaName, cortexSearchServiceDetails.DatabaseName) assert.Equal(t, targetLag, cortexSearchServiceDetails.TargetLag) - assert.Equal(t, strings.ToUpper(on), cortexSearchServiceDetails.On) - assert.NotEmpty(t, cortexSearchServiceDetails.ServiceUrl) - assert.GreaterOrEqual(t, cortexSearchServiceDetails.NumRowsIndexed, 0) - assert.Empty(t, cortexSearchServiceDetails.Comment) + assert.NotEmpty(t, cortexSearchServiceDetails.Warehouse) + assert.Equal(t, strings.ToUpper(on), *cortexSearchServiceDetails.SearchColumn) + assert.NotEmpty(t, cortexSearchServiceDetails.AttributeColumns) + assert.NotEmpty(t, cortexSearchServiceDetails.Columns) + assert.NotEmpty(t, cortexSearchServiceDetails.Definition) + assert.Nil(t, cortexSearchServiceDetails.Comment) + assert.NotEmpty(t, cortexSearchServiceDetails.ServiceQueryUrl) + assert.NotEmpty(t, cortexSearchServiceDetails.DataTimestamp) + assert.GreaterOrEqual(t, cortexSearchServiceDetails.SourceDataNumRows, 0) + assert.NotEmpty(t, cortexSearchServiceDetails.IndexingState) + assert.Empty(t, cortexSearchServiceDetails.IndexingError) }) t.Run("describe: when cortex search service does not exist", func(t *testing.T) { @@ -113,7 +121,7 @@ func TestInt_CortexSearchServices(t *testing.T) { cortexSearchServiceDetails, err := client.CortexSearchServices.Describe(ctx, id) require.NoError(t, err) - require.Equal(t, newComment, cortexSearchServiceDetails.Comment) + require.Equal(t, newComment, *cortexSearchServiceDetails.Comment) require.Equal(t, newTargetLag, cortexSearchServiceDetails.TargetLag) }) diff --git a/pkg/sdk/testint/tasks_gen_integration_test.go b/pkg/sdk/testint/tasks_gen_integration_test.go index 759faa0e1d..bb336de0cf 100644 --- a/pkg/sdk/testint/tasks_gen_integration_test.go +++ b/pkg/sdk/testint/tasks_gen_integration_test.go @@ -140,7 +140,7 @@ func TestInt_Tasks(t *testing.T) { t.Run("create task: with initial warehouse", func(t *testing.T) { request := createTaskBasicRequest(t). - WithWarehouse(sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(&sdk.WarehouseSizeXSmall)) + WithWarehouse(sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(sdk.Pointer(sdk.WarehouseSizeXSmall))) task := createTaskWithRequest(t, request) diff --git a/pkg/sdk/testint/warehouses_integration_test.go b/pkg/sdk/testint/warehouses_integration_test.go index 08f84091d5..c2c7a5c422 100644 --- a/pkg/sdk/testint/warehouses_integration_test.go +++ b/pkg/sdk/testint/warehouses_integration_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + objectAssert "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -84,11 +86,11 @@ func TestInt_Warehouses(t *testing.T) { id := testClientHelper().Ids.RandomAccountObjectIdentifier() err := client.Warehouses.Create(ctx, id, &sdk.CreateWarehouseOptions{ OrReplace: sdk.Bool(true), - WarehouseType: &sdk.WarehouseTypeStandard, - WarehouseSize: &sdk.WarehouseSizeSmall, + WarehouseType: sdk.Pointer(sdk.WarehouseTypeStandard), + WarehouseSize: sdk.Pointer(sdk.WarehouseSizeSmall), MaxClusterCount: sdk.Int(8), MinClusterCount: sdk.Int(2), - ScalingPolicy: &sdk.ScalingPolicyEconomy, + ScalingPolicy: sdk.Pointer(sdk.ScalingPolicyEconomy), AutoSuspend: sdk.Int(1000), AutoResume: sdk.Bool(true), InitiallySuspended: sdk.Bool(false), @@ -113,6 +115,22 @@ func TestInt_Warehouses(t *testing.T) { require.NoError(t, err) t.Cleanup(testClientHelper().Warehouse.DropWarehouseFunc(t, id)) + // we can use the same assertion builder in the SDK tests + objectAssert.AssertThatObject(t, objectAssert.Warehouse(t, id). + HasName(id.Name()). + HasType(sdk.WarehouseTypeStandard). + HasSize(sdk.WarehouseSizeSmall). + HasMaxClusterCount(8). + HasMinClusterCount(2). + HasScalingPolicy(sdk.ScalingPolicyEconomy). + HasAutoSuspend(1000). + HasAutoResume(true). + HasStateOneOf(sdk.WarehouseStateResuming, sdk.WarehouseStateStarted). + HasResourceMonitor(resourceMonitor.ID()). + HasComment("comment"). + HasEnableQueryAcceleration(true). + HasQueryAccelerationMaxScaleFactor(90)) + warehouse, err := client.Warehouses.ShowByID(ctx, id) require.NoError(t, err) assert.Equal(t, id.Name(), warehouse.Name) @@ -129,6 +147,22 @@ func TestInt_Warehouses(t *testing.T) { assert.Equal(t, true, warehouse.EnableQueryAcceleration) assert.Equal(t, 90, warehouse.QueryAccelerationMaxScaleFactor) + // we can also use the read object to initialize: + objectAssert.AssertThatObject(t, objectAssert.WarehouseFromObject(t, warehouse). + HasName(id.Name()). + HasType(sdk.WarehouseTypeStandard). + HasSize(sdk.WarehouseSizeSmall). + HasMaxClusterCount(8). + HasMinClusterCount(2). + HasScalingPolicy(sdk.ScalingPolicyEconomy). + HasAutoSuspend(1000). + HasAutoResume(true). + HasStateOneOf(sdk.WarehouseStateResuming, sdk.WarehouseStateStarted). + HasResourceMonitor(resourceMonitor.ID()). + HasComment("comment"). + HasEnableQueryAcceleration(true). + HasQueryAccelerationMaxScaleFactor(90)) + tag1Value, err := client.SystemFunctions.GetTag(ctx, tag.ID(), warehouse.ID(), sdk.ObjectTypeWarehouse) require.NoError(t, err) assert.Equal(t, "v1", tag1Value) @@ -189,7 +223,7 @@ func TestInt_Warehouses(t *testing.T) { alterOptions := &sdk.AlterWarehouseOptions{ // WarehouseType omitted on purpose - it requires suspending the warehouse (separate test cases) Set: &sdk.WarehouseSet{ - WarehouseSize: &sdk.WarehouseSizeMedium, + WarehouseSize: sdk.Pointer(sdk.WarehouseSizeMedium), WaitForCompletion: sdk.Bool(true), MaxClusterCount: sdk.Int(3), MinClusterCount: sdk.Int(2), diff --git a/pkg/sdk/warehouses.go b/pkg/sdk/warehouses.go index 0d0c2c0e83..330361ff07 100644 --- a/pkg/sdk/warehouses.go +++ b/pkg/sdk/warehouses.go @@ -38,7 +38,7 @@ type warehouses struct { type WarehouseType string -var ( +const ( WarehouseTypeStandard WarehouseType = "STANDARD" WarehouseTypeSnowparkOptimized WarehouseType = "SNOWPARK-OPTIMIZED" ) @@ -56,7 +56,7 @@ func ToWarehouseType(s string) (WarehouseType, error) { type WarehouseSize string -var ( +const ( WarehouseSizeXSmall WarehouseSize = "XSMALL" WarehouseSizeSmall WarehouseSize = "SMALL" WarehouseSizeMedium WarehouseSize = "MEDIUM" @@ -98,7 +98,7 @@ func ToWarehouseSize(s string) (WarehouseSize, error) { type ScalingPolicy string -var ( +const ( ScalingPolicyStandard ScalingPolicy = "STANDARD" ScalingPolicyEconomy ScalingPolicy = "ECONOMY" ) diff --git a/pkg/sdk/warehouses_test.go b/pkg/sdk/warehouses_test.go index 699ccf8eef..685dd9965c 100644 --- a/pkg/sdk/warehouses_test.go +++ b/pkg/sdk/warehouses_test.go @@ -26,11 +26,11 @@ func TestWarehouseCreate(t *testing.T) { name: NewAccountObjectIdentifier("completewarehouse"), IfNotExists: Bool(true), - WarehouseType: &WarehouseTypeStandard, - WarehouseSize: &WarehouseSizeX4Large, + WarehouseType: Pointer(WarehouseTypeStandard), + WarehouseSize: Pointer(WarehouseSizeX4Large), MaxClusterCount: Int(8), MinClusterCount: Int(3), - ScalingPolicy: &ScalingPolicyEconomy, + ScalingPolicy: Pointer(ScalingPolicyEconomy), AutoSuspend: Int(1000), AutoResume: Bool(true), InitiallySuspended: Bool(false), @@ -109,7 +109,7 @@ func TestWarehouseAlter(t *testing.T) { opts := &AlterWarehouseOptions{ name: NewAccountObjectIdentifier("mywarehouse"), Set: &WarehouseSet{ - WarehouseType: &WarehouseTypeSnowparkOptimized, + WarehouseType: Pointer(WarehouseTypeSnowparkOptimized), WaitForCompletion: Bool(false), MinClusterCount: Int(4), MaxClusterCount: Int(5),