diff --git a/docs/configuration-reference/platforms/packet.md b/docs/configuration-reference/platforms/packet.md index 48b391ef9..151c29633 100644 --- a/docs/configuration-reference/platforms/packet.md +++ b/docs/configuration-reference/platforms/packet.md @@ -199,63 +199,62 @@ node_type = var.custom_default_worker_type ## Attribute reference -| Argument | Description | Default | Type | Required | -|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------:|:------------:|:--------:| -| `auth_token` | Packet Auth token. Use the `PACKET_AUTH_TOKEN` environment variable instead. | - | string | false | -| `asset_dir` | Location where Lokomotive stores cluster assets. | - | string | true | -| `cluster_name` | Name of the cluster. | - | string | true | -| `tags` | List of tags that will be propagated to master nodes. | - | map(string) | false | -| `controller_count` | Number of controller nodes. | 1 | number | false | -| `controller_type` | Packet instance type for controllers. Changing this field on existing cluster will be ignored. To actually apply it, cluster needs to be destroyed and re-created. | "c3.small.x86" | string | false | -| `controller_clc_snippets` | Controller Flatcar Container Linux Config snippets. | [] | list(string) | false | -| `dns` | DNS configuration block. | - | object | true | -| `dns.zone` | A DNS zone to use for the cluster. The following format is used for cluster-related DNS records: `..` | - | string | true | -| `dns.provider` | DNS provider to use for the cluster. Valid values: `cloudflare`, `route53`, `manual`. | - | string | true | -| `oidc` | OIDC configuration block. | - | object | false | -| `oidc.issuer_url` | URL of the provider which allows the API server to discover public signing keys. Only URLs which use the https:// scheme are accepted. | - | string | false | -| `oidc.client_id` | A client id that all tokens must be issued for. | "clusterauth" | string | false | -| `oidc.username_claim` | JWT claim to use as the user name. | "email" | string | false | -| `oidc.groups_claim` | JWT claim to use as the user’s group. | "groups" | string | false | -| `facility` | Packet facility to use for deploying the cluster. | - | string | false | -| `project_id` | Packet project ID. | - | string | true | -| `ssh_pubkeys` | List of SSH public keys for user `core`. Each element must be specified in a valid OpenSSH public key format, as defined in RFC 4253 Section 6.6, e.g. "ssh-rsa AAAAB3N...". | - | list(string) | true | -| `os_arch` | Flatcar Container Linux architecture to install (amd64, arm64). | "amd64" | string | false | -| `os_channel` | Flatcar Container Linux channel to install from (stable, beta, alpha, edge). | "stable" | string | false | -| `os_version` | Flatcar Container Linux version to install. Version such as "2303.3.1" or "current". | "current" | string | false | -| `ipxe_script_url` | Boot via iPXE. Required for arm64. | - | string | false | -| `management_cidrs` | List of IPv4 CIDRs authorized to access or manage the cluster. Example ["0.0.0.0/0"] to allow all. | - | list(string) | true | -| `node_private_cidr` | Private IPv4 CIDR of the nodes used to allow inter-node traffic. Example "10.0.0.0/8" | - | string | true | -| `enable_aggregation` | Enable the Kubernetes Aggregation Layer. | true | bool | false | -| `enable_tls_bootstrap` | Enable TLS bootstraping for Kubelet. | true | bool | false | -| `encrypt_pod_traffic` | Enable in-cluster pod traffic encryption. If true `network_mtu` is reduced by 60 to make room for the encryption header. | false | bool | false | -| `ignore_x509_cn_check` | Ignore check of common name in x509 certificates. If any application is built pre golang 1.15 then API server rejects x509 from such application, enable this to get around apiserver. | false | bool | false | -| `network_mtu` | Physical Network MTU. | 1500 | number | false | -| `pod_cidr` | CIDR IPv4 range to assign Kubernetes pods. | "10.2.0.0/16" | string | false | -| `service_cidr` | CIDR IPv4 range to assign Kubernetes services. | "10.3.0.0/16" | string | false | -| `cluster_domain_suffix` | Cluster's DNS domain. | "cluster.local" | string | false | -| `enable_reporting` | Enables usage or analytics reporting to upstream. | false | bool | false | -| `reservation_ids` | Block with Packet hardware reservation IDs for controller nodes. Each key must have the format `controller-${index}` and the value is the reservation UUID. Can't be combined with `reservation_ids_default`. Example: `reservation_ids = { controller-0 = "" }`. | - | map(string) | false | -| `reservation_ids_default` | Default reservation ID for controllers. The value`next-available` will choose any reservation that matches the pool's device type and facility. Can't be combined with `reservation_ids` | - | string | false | -| `certs_validity_period_hours` | Validity of all the certificates in hours. | 8760 | number | false | -| `worker_pool` | Configuration block for worker pools. There can be more than one. | - | list(object) | true | -| `worker_pool.count` | Number of workers in the worker pool. Can be changed afterwards to add or delete workers. | 1 | number | true | -| `worker_pool.clc_snippets` | Flatcar Container Linux Config snippets for nodes in the worker pool. | [] | list(string) | false | -| `worker_pool.tags` | List of tags that will be propagated to nodes in the worker pool. | - | map(string) | false | -| `worker_pool.disable_bgp` | Disable BGP on nodes. Nodes won't be able to connect to Packet BGP peers. | false | bool | false | -| `worker_pool.ipxe_script_url` | Boot via iPXE. Required for arm64. | - | string | false | -| `worker_pool.os_arch` | Flatcar Container Linux architecture to install (amd64, arm64). | "amd64" | string | false | -| `worker_pool.os_channel` | Flatcar Container Linux channel to install from (stable, beta, alpha, edge). | "stable" | string | false | -| `worker_pool.os_version` | Flatcar Container Linux version to install. Version such as "2303.3.1" or "current". | "current" | string | false | -| `worker_pool.node_type` | Packet instance type for worker nodes. | "c3.small.x86" | string | false | -| `worker_pool.labels` | Map of extra Kubernetes Node labels for worker nodes. | - | map(string) | false | -| `worker_pool.taints` | Map of Taints to assign to worker nodes. | - | map(string) | false | -| `worker_pool.reservation_ids` | Block with Packet hardware reservation IDs for worker nodes. Each key must have the format `worker-${index}` and the value is the reservation UUID. Can't be combined with `reservation_ids_default`. Example: `reservation_ids = { worker-0 = "" }`. | - | map(string) | false | -| `worker_pool.reservation_ids_default` | Default reservation ID for workers. The value`next-available` will choose any reservation that matches the pool's device type and facility. Can't be combined with `reservation_ids`. | - | string | false | -| `worker_pool.setup_raid` | Attempt to create a RAID 0 from extra disks to be used for persistent container storage. Can't be used with `setup_raid_hdd` nor `setup_raid_sdd`. | false | bool | false | -| `worker_pool.setup_raid_hdd` | Attempt to create a RAID 0 from extra Hard Disk drives only, to be used for persistent container storage. Can't be used with `setup_raid` nor `setup_raid_sdd`. | false | bool | false | -| `worker_pool.setup_raid_ssd` | Attempt to create a RAID 0 from extra Solid State Drives only, to be used for persistent container storage. Can't be used with `setup_raid` nor `setup_raid_hdd`. | false | bool | false | -| `worker_pool.setup_raid_ssd_fs` | When set to `true` file system will be created on SSD RAID device and will be mounted on `/mnt/node-local-ssd-storage`. To use the raw device set it to `false`. | false | bool | false | - +| Argument | Description | Default | Type | Required | +|---------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-:-:-------------|-:-:----------|-:-:------| +| `auth_token` | Packet Auth token. Use the `PACKET_AUTH_TOKEN` environment variable instead. | - | string | false | +| `asset_dir` | Location where Lokomotive stores cluster assets. | - | string | true | +| `cluster_name` | Name of the cluster. | - | string | true | +| `tags` | List of tags that will be propagated to master nodes. | - | map(string) | false | +| `controller_count` | Number of controller nodes. | 1 | number | false | +| `controller_type` | Packet instance type for controllers. Changing this field on existing cluster will be ignored. To actually apply it, cluster needs to be destroyed and re-created. | "c3.small.x86" | string | false | +| `controller_clc_snippets` | Controller Flatcar Container Linux Config snippets. | [] | list(string) | false | +| `dns` | DNS configuration block. | - | object | true | +| `dns.zone` | A DNS zone to use for the cluster. The following format is used for cluster-related DNS records: `..` | - | string | true | +| `dns.provider` | DNS provider to use for the cluster. Valid values: `cloudflare`, `route53`, `manual`. | - | string | true | +| `oidc` | OIDC configuration block. | - | object | false | +| `oidc.issuer_url` | URL of the provider which allows the API server to discover public signing keys. Only URLs which use the https:// scheme are accepted. | - | string | false | +| `oidc.client_id` | A client id that all tokens must be issued for. | "clusterauth" | string | false | +| `oidc.username_claim` | JWT claim to use as the user name. | "email" | string | false | +| `oidc.groups_claim` | JWT claim to use as the user’s group. | "groups" | string | false | +| `facility` | Packet facility to use for deploying the cluster. | - | string | false | +| `project_id` | Packet project ID. | - | string | true | +| `ssh_pubkeys` | List of SSH public keys for user `core`. Each element must be specified in a valid OpenSSH public key format, as defined in RFC 4253 Section 6.6, e.g. "ssh-rsa AAAAB3N...". | - | list(string) | true | +| `os_arch` | Flatcar Container Linux architecture to install (amd64, arm64). | "amd64" | string | false | +| `os_channel` | Flatcar Container Linux channel to install from (stable, beta, alpha, edge). | "stable" | string | false | +| `os_version` | Flatcar Container Linux version to install. Version such as "2303.3.1" or "current". | "current" | string | false | +| `ipxe_script_url` | Boot via iPXE. Required for arm64. | - | string | false | +| `management_cidrs` | List of IPv4 CIDRs authorized to access or manage the cluster. Example ["0.0.0.0/0"] to allow all. | - | list(string) | true | +| `node_private_cidr` | Private IPv4 CIDR of the nodes used to allow inter-node traffic. Example "10.0.0.0/8" | - | string | true | +| `enable_aggregation` | Enable the Kubernetes Aggregation Layer. | true | bool | false | +| `enable_tls_bootstrap` | Enable TLS bootstraping for Kubelet. | true | bool | false | +| `encrypt_pod_traffic` | Enable in-cluster pod traffic encryption. If true `network_mtu` is reduced by 60 to make room for the encryption header. | false | bool | false | +| `ignore_x509_cn_check` | Ignore check of common name in x509 certificates. If any application is built pre golang 1.15 then API server rejects x509 from such application, enable this to get around apiserver. | false | bool | false | +| `network_mtu` | Physical Network MTU. | 1500 | number | false | +| `pod_cidr` | CIDR IPv4 range to assign Kubernetes pods. | "10.2.0.0/16" | string | false | +| `service_cidr` | CIDR IPv4 range to assign Kubernetes services. | "10.3.0.0/16" | string | false | +| `cluster_domain_suffix` | Cluster's DNS domain. | "cluster.local" | string | false | +| `enable_reporting` | Enables usage or analytics reporting to upstream. | false | bool | false | +| `reservation_ids` | Block with Packet hardware reservation IDs for controller nodes. Each key must have the format `controller-${index}` and the value is the reservation UUID. Can't be combined with `reservation_ids_default`. Key indexes must be sequential and start from 0. Example: `reservation_ids = { controller-0 = "" }`. | - | map(string) | false | +| `reservation_ids_default` | Default reservation ID for controllers. The value`next-available` will choose any reservation that matches the pool's device type and facility. Can't be combined with `reservation_ids` | - | string | false | +| `certs_validity_period_hours` | Validity of all the certificates in hours. | 8760 | number | false | +| `worker_pool` | Configuration block for worker pools. There can be more than one. | - | list(object) | true | +| `worker_pool.count` | Number of workers in the worker pool. Can be changed afterwards to add or delete workers. | 1 | number | true | +| `worker_pool.clc_snippets` | Flatcar Container Linux Config snippets for nodes in the worker pool. | [] | list(string) | false | +| `worker_pool.tags` | List of tags that will be propagated to nodes in the worker pool. | - | map(string) | false | +| `worker_pool.disable_bgp` | Disable BGP on nodes. Nodes won't be able to connect to Packet BGP peers. | false | bool | false | +| `worker_pool.ipxe_script_url` | Boot via iPXE. Required for arm64. | - | string | false | +| `worker_pool.os_arch` | Flatcar Container Linux architecture to install (amd64, arm64). | "amd64" | string | false | +| `worker_pool.os_channel` | Flatcar Container Linux channel to install from (stable, beta, alpha, edge). | "stable" | string | false | +| `worker_pool.os_version` | Flatcar Container Linux version to install. Version such as "2303.3.1" or "current". | "current" | string | false | +| `worker_pool.node_type` | Packet instance type for worker nodes. | "c3.small.x86" | string | false | +| `worker_pool.labels` | Map of extra Kubernetes Node labels for worker nodes. | - | map(string) | false | +| `worker_pool.taints` | Map of Taints to assign to worker nodes. | - | map(string) | false | +| `worker_pool.reservation_ids` | Block with Packet hardware reservation IDs for worker nodes. Each key must have the format `worker-${index}` and the value is the reservation UUID. Can't be combined with `reservation_ids_default`. Key indexes must be sequential and start from 0. Example: `reservation_ids = { worker-0 = "" }`. | - | map(string) | false | +| `worker_pool.reservation_ids_default` | Default reservation ID for workers. The value`next-available` will choose any reservation that matches the pool's device type and facility. Can't be combined with `reservation_ids`. | - | string | false | +| `worker_pool.setup_raid` | Attempt to create a RAID 0 from extra disks to be used for persistent container storage. Can't be used with `setup_raid_hdd` nor `setup_raid_sdd`. | false | bool | false | +| `worker_pool.setup_raid_hdd` | Attempt to create a RAID 0 from extra Hard Disk drives only, to be used for persistent container storage. Can't be used with `setup_raid` nor `setup_raid_sdd`. | false | bool | false | +| `worker_pool.setup_raid_ssd` | Attempt to create a RAID 0 from extra Solid State Drives only, to be used for persistent container storage. Can't be used with `setup_raid` nor `setup_raid_hdd`. | false | bool | false | +| `worker_pool.setup_raid_ssd_fs` | When set to `true` file system will be created on SSD RAID device and will be mounted on `/mnt/node-local-ssd-storage`. To use the raw device set it to `false`. | false | bool | false | ## Applying diff --git a/pkg/platform/packet/packet.go b/pkg/platform/packet/packet.go index 804370b32..9a93d743b 100644 --- a/pkg/platform/packet/packet.go +++ b/pkg/platform/packet/packet.go @@ -19,6 +19,7 @@ import ( "fmt" "os" "path/filepath" + "reflect" "sort" "strconv" "strings" @@ -491,11 +492,11 @@ func (c *config) checkWorkerPoolNamesUnique() hcl.Diagnostics { func (c *config) checkReservationIDs() hcl.Diagnostics { var diagnostics hcl.Diagnostics - d := checkEachReservation(c.ReservationIDs, c.ReservationIDsDefault, c.ClusterName, controller) + d := checkEachReservation(c.ReservationIDs, c.ReservationIDsDefault, c.ClusterName, controller, c.ControllerCount) diagnostics = append(diagnostics, d...) for _, w := range c.WorkerPools { - d := checkEachReservation(w.ReservationIDs, w.ReservationIDsDefault, w.Name, worker) + d := checkEachReservation(w.ReservationIDs, w.ReservationIDsDefault, w.Name, worker, w.Count) diagnostics = append(diagnostics, d...) } @@ -533,7 +534,9 @@ func (c *config) validateOSVersion() hcl.Diagnostics { // pool are not mixed between using "next-available" and specific UUIDs, as this // can't work reliably. // For more info, see comment when calling terraformCreateReservations(). -func checkEachReservation(reservationIDs map[string]string, resDefault, name string, role nodeRole) hcl.Diagnostics { +// +//nolint:funlen,lll +func checkEachReservation(reservationIDs map[string]string, resDefault, name string, role nodeRole, count int) hcl.Diagnostics { var diagnostics hcl.Diagnostics errorPrefix := "Worker pool" @@ -567,15 +570,21 @@ func checkEachReservation(reservationIDs map[string]string, resDefault, name str // Check reservation_ids map doesn't use "next-available" as a value. for _, v := range reservationIDs { - if v != "next-available" { - continue + if v == "next-available" { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("%v reservations_ids entries can't use \"next-available\"", errorPrefix), + Detail: fmt.Sprintf("%v: %q uses it, use specific UUIDs or reservations_ids_default only", errorPrefix, name), + }) } - diagnostics = append(diagnostics, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("%v reservations_ids entries can't use \"next-available\"", errorPrefix), - Detail: fmt.Sprintf("%v: %q uses it, use specific UUIDs or reservations_ids_default only", errorPrefix, name), - }) + if v == "" { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("%v reservations_ids entries can't be empty", errorPrefix), + Detail: fmt.Sprintf("%v: %q is empty", errorPrefix, name), + }) + } } // Check format is: @@ -589,6 +598,14 @@ func checkEachReservation(reservationIDs map[string]string, resDefault, name str d := checkResFormat(reservationIDs, name, errorPrefix, resPrefix) diagnostics = append(diagnostics, d...) + if len(reservationIDs) > 0 && len(reservationIDs) != count { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("%v reservations_ids does not match count field", errorPrefix), + Detail: fmt.Sprintf("%v: %q all nodes in pool must have reservation IDs set", errorPrefix, name), + }) + } + return diagnostics } @@ -597,7 +614,14 @@ func checkEachReservation(reservationIDs map[string]string, resDefault, name str func checkResFormat(reservationIDs map[string]string, name, errorPrefix, resPrefix string) hcl.Diagnostics { var diagnostics hcl.Diagnostics + deviceIndexes := []int{} + expectedIndexes := []int{} + index := 0 + for key := range reservationIDs { + expectedIndexes = append(expectedIndexes, index) + index++ + hclErr := &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid reservation ID", @@ -624,11 +648,24 @@ func checkResFormat(reservationIDs map[string]string, name, errorPrefix, resPref // Check a valid number is used after "controller-" or // "worker-". index := resEntry[1] - if _, err := strconv.Atoi(index); err != nil { - diagnostics = append(diagnostics, hclErr) - // Don't duplicate the same error, show it one per key. + + i, err := strconv.Atoi(index) + if err == nil { + deviceIndexes = append(deviceIndexes, i) continue } + + diagnostics = append(diagnostics, hclErr) + } + + sort.Ints(deviceIndexes) + + if !reflect.DeepEqual(deviceIndexes, expectedIndexes) { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reservation ID", + Detail: fmt.Sprintf("%v: reservation IDs must be sequential and start from 0", errorPrefix), + }) } return diagnostics diff --git a/pkg/platform/packet/packet_test.go b/pkg/platform/packet/packet_test.go index 92629eaee..3d15d99bc 100644 --- a/pkg/platform/packet/packet_test.go +++ b/pkg/platform/packet/packet_test.go @@ -15,7 +15,7 @@ package packet import ( - "sort" + "reflect" "testing" ) @@ -150,118 +150,101 @@ func TestValidateOSVersion(t *testing.T) { } } -func TestCheckResFormatPrefixValidInput(t *testing.T) { - r := map[string]string{"worker-2": "", "worker-3": ""} - - if d := checkResFormat(r, "", "", "worker"); d.HasErrors() { - t.Errorf("Validation failed and shouldn't for: %v", r) - } -} - -func TestCheckResFormatPrefixInvalidInput(t *testing.T) { - r := map[string]string{"broken-1": "", "worker-1-2": "", "worker-a": ""} - - if d := checkResFormat(r, "", "", "worker"); !d.HasErrors() { - t.Errorf("Should fail with res: %v", r) - } -} - -func TestCheckEachReservationValidInput(t *testing.T) { - cases := []struct { - role nodeRole - resDefault string - reservationIDs map[string]string +//nolint:funlen +func TestCheckValidConfig(t *testing.T) { + cases := map[string]struct { + mutateF func(*config) + expectError bool }{ - { - // Test validates worker config. - role: worker, - reservationIDs: map[string]string{ - "worker-2": "", - "worker-3": "", + "base_config_is_valid": { + mutateF: func(*config) {}, + }, + "reservation_IDs_for_controller_nodes_can't_have_random_prefix": { + mutateF: func(c *config) { + c.ReservationIDs = map[string]string{"foo-1": "bar"} }, + expectError: true, }, - { - // Test validates controller config. - role: controller, - reservationIDs: map[string]string{ - "controller-2": "", - "controller-3": "", + "reservation_IDs_for_controller_nodes_must_be_prefixed_with_'controller'": { + mutateF: func(c *config) { + c.ControllerCount = 1 + c.ReservationIDs = map[string]string{"controller-0": "bar"} }, }, - { - // Test works if resDefault is set and no - // reservationIDs. - role: controller, - resDefault: "next-available", + "reservation_IDs_for_worker_nodes_can't_have_random_prefix": { + mutateF: func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{"foo-0": "bar"} + }, + expectError: true, }, - } - - for _, tc := range cases { - if d := checkEachReservation(tc.reservationIDs, tc.resDefault, "", tc.role); d.HasErrors() { - t.Errorf("Should not fail with valid input: %v", tc) - } - } -} - -func TestCheckEachReservationInvalidInput(t *testing.T) { - cases := []struct { - role nodeRole - resDefault string - reservationIDs map[string]string - }{ - { - // Test if nodeRole is worker, reservation should be - // "worker-" not "controller". - role: worker, - reservationIDs: map[string]string{ - "controller-1": "", + "reservation_IDs_for_worker_nodes_must_be_prefixed_with_'worker'": { + mutateF: func(c *config) { + c.WorkerPools[0].Count = 1 + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "bar"} }, }, - { - // Idem previous but vice-versa. - role: controller, - reservationIDs: map[string]string{ - "worker-3": "", + "reservation_IDs_for_worker_nodes_can't_be_mixed_default_reservation_ID": { + mutateF: func(c *config) { + c.WorkerPools[0].ReservationIDsDefault = "next-available" + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "bar"} }, + expectError: true, }, - { - // Test if resDefault is set to next-available, - // reservationIDs should be empty. - role: worker, - resDefault: "next-available", - reservationIDs: map[string]string{ - "worker-3": "", + "reservation_IDs_for_worker_nodes_can't_be_set_to_'next-available'": { + mutateF: func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "next-available"} }, + expectError: true, }, - { - // Test reservationIDs should never be set to - // "next-available". - role: worker, - reservationIDs: map[string]string{ - "worker-3": "next-available", + "reservation_IDs_can't_be_empty": { + mutateF: func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": ""} + }, + expectError: true, + }, + "reservation_IDs_must_be_sequential": { + mutateF: func(c *config) { + c.WorkerPools[0].Count = 2 + c.WorkerPools[0].ReservationIDs = map[string]string{ + "worker-0": "foo", + "worker-1": "bar", + } + }, + }, + "not_sequential_reservation_IDs_are_invalid": { + mutateF: func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{ + "worker-0": "foo", + "worker-2": "bar", + } }, + expectError: true, }, } - for _, tc := range cases { - if d := checkEachReservation(tc.reservationIDs, tc.resDefault, "", tc.role); !d.HasErrors() { - t.Errorf("No error detected in invalid input: %v", tc) - } - } -} + for name, c := range cases { + c := c -//nolint: funlen -func TestTerraformAddDeps(t *testing.T) { - type testCase struct { - // Config to test - cfg config - // Expected config after running the test - exp config - } + t.Run(name, func(t *testing.T) { + config := baseConfig() + + c.mutateF(config) + + diagnostics := config.checkValidConfig() - var cases []testCase + if diagnostics.HasErrors() && !c.expectError { + t.Fatalf("unexpected validation error: %v", diagnostics) + } + + if !diagnostics.HasErrors() && c.expectError { + t.Fatalf("expected error, but validation passed") + } + }) + } +} - base := config{ +func baseConfig() *config { + return &config{ ClusterName: "c", WorkerPools: []workerPool{ { @@ -272,86 +255,91 @@ func TestTerraformAddDeps(t *testing.T) { }, }, } +} - // Test WorkerPool w/o res depends on 1 WorkerPool with res - test := base - test.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} - exp := test - exp.WorkerPools[1].NodesDependOn = []string{poolTarget("1", "worker_nodes_ids")} - - cases = append(cases, testCase{test, exp}) - - // Test 2 WorkerPools w/o res depend on 2 WorkerPool with rest - test = base - test.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} - test.WorkerPools = append(test.WorkerPools, workerPool{Name: "3"}) - exp = test - exp.WorkerPools[1].NodesDependOn = []string{poolTarget("1", "worker_nodes_ids")} - - cases = append(cases, testCase{test, exp}) - - // Test 1 WorkerPools w/o res depend on 2 WorkerPool with res - test = base - test.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} - test.WorkerPools[1].ReservationIDs = map[string]string{"worker-0": "dummy"} - test.WorkerPools = append(test.WorkerPools, workerPool{Name: "3"}) - exp = test - exp.WorkerPools[2].NodesDependOn = []string{ - poolTarget("1", "worker_nodes_ids"), - poolTarget("2", "worker_nodes_ids"), +//nolint: funlen +func TestTerraformAddDeps(t *testing.T) { + cases := map[string]struct { + configF func(*config) + expectedConfigF func(*config) + }{ + "worker pool without reservation IDs depends on worker pool with reservation ID": { + func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} + }, + func(c *config) { + c.WorkerPools[1].NodesDependOn = []string{poolTarget("1", "worker_nodes_ids")} + }, + }, + "all worker pools without reservation IDs depends on worker pool with reservation ID": { + func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} + c.WorkerPools = append(c.WorkerPools, workerPool{Name: "3"}) + }, + func(c *config) { + c.WorkerPools[1].NodesDependOn = []string{poolTarget("1", "worker_nodes_ids")} + }, + }, + "worker pool without reservation IDs depends on all worker pools with reservation IDs": { + func(c *config) { + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} + c.WorkerPools[1].ReservationIDs = map[string]string{"worker-0": "dummy"} + c.WorkerPools = append(c.WorkerPools, workerPool{Name: "3"}) + }, + func(c *config) { + c.WorkerPools[2].NodesDependOn = []string{ + poolTarget("1", "worker_nodes_ids"), + poolTarget("2", "worker_nodes_ids"), + } + }, + }, + "worker pools without reservation IDs depends on controller nodes with reservation IDs": { + func(c *config) { + c.ReservationIDs = map[string]string{"controller-0": "dummy"} + }, + func(c *config) { + c.WorkerPools[0].NodesDependOn = []string{clusterTarget("1", "device_ids")} + c.WorkerPools[1].NodesDependOn = []string{clusterTarget("1", "device_ids")} + }, + }, + "worker pool without reservation IDs depends on controller nodes and worker pools with reservation IDs": { + func(c *config) { + c.ReservationIDs = map[string]string{"controller-0": "dummy"} + c.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} + }, + func(c *config) { + c.WorkerPools[1].NodesDependOn = []string{clusterTarget("1", "device_ids"), poolTarget("1", "device_ids")} + }, + }, } - cases = append(cases, testCase{test, exp}) + for name, c := range cases { + c := c - // Test 2 WorkerPools w/o res depend on controllers - test = base - test.ReservationIDs = map[string]string{"controller-0": "dummy"} - exp = test - exp.WorkerPools[0].NodesDependOn = []string{clusterTarget("1", "device_ids")} - exp.WorkerPools[1].NodesDependOn = []string{clusterTarget("1", "device_ids")} + t.Run(name, func(t *testing.T) { + // Create copy of base config. + config := baseConfig() - cases = append(cases, testCase{test, exp}) + // Mutate it. + c.configF(config) - // Test 1 WorkerPools w/o res depends on controllers and WorkerPool with - // res - test = base - test.ReservationIDs = map[string]string{"controller-0": "dummy"} - test.WorkerPools[0].ReservationIDs = map[string]string{"worker-0": "dummy"} - exp = test - exp.WorkerPools[1].NodesDependOn = []string{clusterTarget("1", "device_ids"), poolTarget("1", "device_ids")} + // Copy mutated config. + expectedConfig := config - cases = append(cases, testCase{test, exp}) - - for tcIdx, tc := range cases { - test := tc.cfg - exp := tc.exp + // Mutate to expected config. + c.expectedConfigF(expectedConfig) - test.terraformAddDeps() + // Add dependencies. + config.terraformAddDeps() - for i, w := range test.WorkerPools { - ret := w.NodesDependOn - expRet := exp.WorkerPools[i].NodesDependOn + for i, workerPool := range config.WorkerPools { + dependencies := workerPool.NodesDependOn + expectedDependencies := expectedConfig.WorkerPools[i].NodesDependOn - if equal := cmpSliceString(ret, expRet); !equal { - t.Errorf("In test %v, expected %v, got %v", tcIdx, expRet, ret) + if !reflect.DeepEqual(dependencies, expectedDependencies) { + t.Fatalf("Expected %v, got %v", expectedDependencies, dependencies) + } } - } + }) } } - -func cmpSliceString(a, b []string) bool { - if len(a) != len(b) { - return false - } - - sort.Strings(a) - sort.Strings(b) - - for index, elem := range a { - if elem != b[index] { - return false - } - } - - return true -}