From 5d482d2dc59ad83a6d31f1d4adb29609da35e06b Mon Sep 17 00:00:00 2001 From: Andrew Starr-Bochicchio Date: Fri, 25 Jun 2021 16:09:08 -0400 Subject: [PATCH 1/3] k8s: Make create timeout configurable (fixes: #329). --- ...esource_digitalocean_kubernetes_cluster.go | 30 +++++++++++------ ...ce_digitalocean_kubernetes_cluster_test.go | 3 +- ...ource_digitalocean_kubernetes_node_pool.go | 33 ++++++++++++------- 3 files changed, 43 insertions(+), 23 deletions(-) diff --git a/digitalocean/resource_digitalocean_kubernetes_cluster.go b/digitalocean/resource_digitalocean_kubernetes_cluster.go index 6d38d0b5c..d308d624e 100644 --- a/digitalocean/resource_digitalocean_kubernetes_cluster.go +++ b/digitalocean/resource_digitalocean_kubernetes_cluster.go @@ -151,6 +151,10 @@ func resourceDigitalOceanKubernetesCluster() *schema.Resource { }, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + CustomizeDiff: customdiff.All( customdiff.ForceNewIfChange("version", func(ctx context.Context, old, new, meta interface{}) bool { // "version" can only be upgraded to newer versions, so we must create a new resource @@ -270,15 +274,16 @@ func resourceDigitalOceanKubernetesClusterCreate(ctx context.Context, d *schema. return diag.Errorf("Error creating Kubernetes cluster: %s", err) } + // set the cluster id + d.SetId(cluster.ID) + // wait for completion - cluster, err = waitForKubernetesClusterCreate(client, cluster.ID) + _, err = waitForKubernetesClusterCreate(client, d) if err != nil { + d.SetId("") return diag.Errorf("Error creating Kubernetes cluster: %s", err) } - // set the cluster id - d.SetId(cluster.ID) - return resourceDigitalOceanKubernetesClusterRead(ctx, d, meta) } @@ -419,7 +424,8 @@ func resourceDigitalOceanKubernetesClusterUpdate(ctx context.Context, d *schema. } // update the existing default pool - _, err := digitaloceanKubernetesNodePoolUpdate(client, newPool, d.Id(), oldPool["id"].(string), digitaloceanKubernetesDefaultNodePoolTag) + timeout := d.Timeout(schema.TimeoutCreate) + _, err := digitaloceanKubernetesNodePoolUpdate(client, timeout, newPool, d.Id(), oldPool["id"].(string), digitaloceanKubernetesDefaultNodePoolTag) if err != nil { return diag.FromErr(err) } @@ -547,13 +553,17 @@ func resourceDigitalOceanKubernetesClusterImportState(d *schema.ResourceData, me return resourceDatas, nil } -func waitForKubernetesClusterCreate(client *godo.Client, id string) (*godo.KubernetesCluster, error) { - ticker := time.NewTicker(10 * time.Second) - timeout := 120 - n := 0 +func waitForKubernetesClusterCreate(client *godo.Client, d *schema.ResourceData) (*godo.KubernetesCluster, error) { + var ( + tickerInterval = 10 * time.Second + timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds() + timeout = int(timeoutSeconds / tickerInterval.Seconds()) + n = 0 + ) + ticker := time.NewTicker(tickerInterval) for range ticker.C { - cluster, _, err := client.Kubernetes.Get(context.Background(), id) + cluster, _, err := client.Kubernetes.Get(context.Background(), d.Id()) if err != nil { ticker.Stop() return nil, fmt.Errorf("Error trying to read cluster state: %s", err) diff --git a/digitalocean/resource_digitalocean_kubernetes_cluster_test.go b/digitalocean/resource_digitalocean_kubernetes_cluster_test.go index 27fc5e83d..071702c7d 100644 --- a/digitalocean/resource_digitalocean_kubernetes_cluster_test.go +++ b/digitalocean/resource_digitalocean_kubernetes_cluster_test.go @@ -112,7 +112,8 @@ func TestAccDigitalOceanKubernetesCluster_Basic(t *testing.T) { resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "vpc_uuid"), resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "auto_upgrade"), resource.TestMatchResourceAttr("digitalocean_kubernetes_cluster.foobar", "urn", expectedURNRegEx), - resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "maintenance_policy"), + resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "maintenance_policy.0.day"), + resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "maintenance_policy.0.start_time"), ), }, // Update: remove default node_pool taints diff --git a/digitalocean/resource_digitalocean_kubernetes_node_pool.go b/digitalocean/resource_digitalocean_kubernetes_node_pool.go index c60f1b0e7..2e8909252 100644 --- a/digitalocean/resource_digitalocean_kubernetes_node_pool.go +++ b/digitalocean/resource_digitalocean_kubernetes_node_pool.go @@ -28,6 +28,10 @@ func resourceDigitalOceanKubernetesNodePool() *schema.Resource { SchemaVersion: 1, Schema: nodePoolSchema(true), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, } } @@ -46,7 +50,8 @@ func resourceDigitalOceanKubernetesNodePoolCreate(ctx context.Context, d *schema "taint": d.Get("taint"), } - pool, err := digitaloceanKubernetesNodePoolCreate(client, rawPool, d.Get("cluster_id").(string)) + timeout := d.Timeout(schema.TimeoutCreate) + pool, err := digitaloceanKubernetesNodePoolCreate(client, timeout, rawPool, d.Get("cluster_id").(string)) if err != nil { return diag.Errorf("Error creating Kubernetes node pool: %s", err) } @@ -110,7 +115,8 @@ func resourceDigitalOceanKubernetesNodePoolUpdate(ctx context.Context, d *schema _, newTaint := d.GetChange("taint") rawPool["taint"] = newTaint - _, err := digitaloceanKubernetesNodePoolUpdate(client, rawPool, d.Get("cluster_id").(string), d.Id()) + timeout := d.Timeout(schema.TimeoutCreate) + _, err := digitaloceanKubernetesNodePoolUpdate(client, timeout, rawPool, d.Get("cluster_id").(string), d.Id()) if err != nil { return diag.Errorf("Error updating node pool: %s", err) } @@ -186,7 +192,7 @@ func resourceDigitalOceanKubernetesNodePoolImportState(d *schema.ResourceData, m return []*schema.ResourceData{d}, nil } -func digitaloceanKubernetesNodePoolCreate(client *godo.Client, pool map[string]interface{}, clusterID string, customTags ...string) (*godo.KubernetesNodePool, error) { +func digitaloceanKubernetesNodePoolCreate(client *godo.Client, timeout time.Duration, pool map[string]interface{}, clusterID string, customTags ...string) (*godo.KubernetesNodePool, error) { // append any custom tags tags := expandTags(pool["tags"].(*schema.Set).List()) tags = append(tags, customTags...) @@ -209,7 +215,7 @@ func digitaloceanKubernetesNodePoolCreate(client *godo.Client, pool map[string]i return nil, fmt.Errorf("Unable to create new default node pool %s", err) } - err = waitForKubernetesNodePoolCreate(client, clusterID, p.ID) + err = waitForKubernetesNodePoolCreate(client, timeout, clusterID, p.ID) if err != nil { return nil, err } @@ -217,7 +223,7 @@ func digitaloceanKubernetesNodePoolCreate(client *godo.Client, pool map[string]i return p, nil } -func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, pool map[string]interface{}, clusterID, poolID string, customTags ...string) (*godo.KubernetesNodePool, error) { +func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, timeout time.Duration, pool map[string]interface{}, clusterID, poolID string, customTags ...string) (*godo.KubernetesNodePool, error) { tags := expandTags(pool["tags"].(*schema.Set).List()) tags = append(tags, customTags...) @@ -262,7 +268,7 @@ func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, pool map[string]i return nil, fmt.Errorf("Unable to update nodepool: %s", err) } - err = waitForKubernetesNodePoolCreate(client, clusterID, p.ID) + err = waitForKubernetesNodePoolCreate(client, timeout, clusterID, p.ID) if err != nil { return nil, err } @@ -285,12 +291,15 @@ func digitaloceanKubernetesNodePoolDelete(client *godo.Client, clusterID, poolID return nil } -func waitForKubernetesNodePoolCreate(client *godo.Client, id string, poolID string) error { - tickerInterval := 10 //10s - timeout := 1800 //1800s, 30min - n := 0 +func waitForKubernetesNodePoolCreate(client *godo.Client, duration time.Duration, id string, poolID string) error { + var ( + tickerInterval = 10 * time.Second + timeoutSeconds = duration.Seconds() + timeout = int(timeoutSeconds / tickerInterval.Seconds()) + n = 0 + ) - ticker := time.NewTicker(time.Duration(tickerInterval) * time.Second) + ticker := time.NewTicker(tickerInterval) for range ticker.C { pool, _, err := client.Kubernetes.GetNodePool(context.Background(), id, poolID) if err != nil { @@ -310,7 +319,7 @@ func waitForKubernetesNodePoolCreate(client *godo.Client, id string, poolID stri return nil } - if n*tickerInterval > timeout { + if n > timeout { ticker.Stop() break } From 6485298a8e14d224fd6c2f42a3bc581f8dc9176d Mon Sep 17 00:00:00 2001 From: Andrew Starr-Bochicchio Date: Fri, 25 Jun 2021 16:23:50 -0400 Subject: [PATCH 2/3] dbaas: Make create timeout configurable. --- .../resource_digitalocean_database_cluster.go | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/digitalocean/resource_digitalocean_database_cluster.go b/digitalocean/resource_digitalocean_database_cluster.go index 69010dd29..af6ab1bda 100644 --- a/digitalocean/resource_digitalocean_database_cluster.go +++ b/digitalocean/resource_digitalocean_database_cluster.go @@ -171,6 +171,10 @@ func resourceDigitalOceanDatabaseCluster() *schema.Resource { "tags": tagsSchema(), }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + CustomizeDiff: customdiff.All( transitionVersionToRequired(), validateExclusiveAttributes(), @@ -246,14 +250,15 @@ func resourceDigitalOceanDatabaseClusterCreate(ctx context.Context, d *schema.Re } } - database, err = waitForDatabaseCluster(client, database.ID, "online") + d.SetId(database.ID) + log.Printf("[INFO] database cluster Name: %s", database.Name) + + database, err = waitForDatabaseCluster(client, d, "online") if err != nil { + d.SetId("") return diag.Errorf("Error creating database cluster: %s", err) } - d.SetId(database.ID) - log.Printf("[INFO] database cluster Name: %s", database.Name) - if v, ok := d.GetOk("maintenance_window"); ok { opts := expandMaintWindowOpts(v.([]interface{})) @@ -308,7 +313,7 @@ func resourceDigitalOceanDatabaseClusterUpdate(ctx context.Context, d *schema.Re return diag.Errorf("Error resizing database cluster: %s", err) } - _, err = waitForDatabaseCluster(client, d.Id(), "online") + _, err = waitForDatabaseCluster(client, d, "online") if err != nil { return diag.Errorf("Error resizing database cluster: %s", err) } @@ -331,7 +336,7 @@ func resourceDigitalOceanDatabaseClusterUpdate(ctx context.Context, d *schema.Re return diag.Errorf("Error migrating database cluster: %s", err) } - _, err = waitForDatabaseCluster(client, d.Id(), "online") + _, err = waitForDatabaseCluster(client, d, "online") if err != nil { return diag.Errorf("Error migrating database cluster: %s", err) } @@ -456,13 +461,17 @@ func resourceDigitalOceanDatabaseClusterDelete(ctx context.Context, d *schema.Re return nil } -func waitForDatabaseCluster(client *godo.Client, id string, status string) (*godo.Database, error) { - ticker := time.NewTicker(15 * time.Second) - timeout := 120 - n := 0 +func waitForDatabaseCluster(client *godo.Client, d *schema.ResourceData, status string) (*godo.Database, error) { + var ( + tickerInterval = 15 * time.Second + timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds() + timeout = int(timeoutSeconds / tickerInterval.Seconds()) + n = 0 + ) + ticker := time.NewTicker(tickerInterval) for range ticker.C { - database, _, err := client.Databases.Get(context.Background(), id) + database, _, err := client.Databases.Get(context.Background(), d.Id()) if err != nil { ticker.Stop() return nil, fmt.Errorf("Error trying to read database cluster state: %s", err) From 3986905676bf15f9e8890735576eacdb6341407c Mon Sep 17 00:00:00 2001 From: Andrew Starr-Bochicchio Date: Fri, 25 Jun 2021 16:28:07 -0400 Subject: [PATCH 3/3] k8s: Make delete timeout configurable as well. --- .../resource_digitalocean_database_cluster.go | 2 +- ...esource_digitalocean_kubernetes_cluster.go | 4 +- ...ource_digitalocean_kubernetes_node_pool.go | 44 +++++++++---------- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/digitalocean/resource_digitalocean_database_cluster.go b/digitalocean/resource_digitalocean_database_cluster.go index af6ab1bda..3d09d9d1c 100644 --- a/digitalocean/resource_digitalocean_database_cluster.go +++ b/digitalocean/resource_digitalocean_database_cluster.go @@ -467,9 +467,9 @@ func waitForDatabaseCluster(client *godo.Client, d *schema.ResourceData, status timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds() timeout = int(timeoutSeconds / tickerInterval.Seconds()) n = 0 + ticker = time.NewTicker(tickerInterval) ) - ticker := time.NewTicker(tickerInterval) for range ticker.C { database, _, err := client.Databases.Get(context.Background(), d.Id()) if err != nil { diff --git a/digitalocean/resource_digitalocean_kubernetes_cluster.go b/digitalocean/resource_digitalocean_kubernetes_cluster.go index d308d624e..123eee069 100644 --- a/digitalocean/resource_digitalocean_kubernetes_cluster.go +++ b/digitalocean/resource_digitalocean_kubernetes_cluster.go @@ -556,12 +556,12 @@ func resourceDigitalOceanKubernetesClusterImportState(d *schema.ResourceData, me func waitForKubernetesClusterCreate(client *godo.Client, d *schema.ResourceData) (*godo.KubernetesCluster, error) { var ( tickerInterval = 10 * time.Second - timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds() + timeoutSeconds = d.Timeout(schema.TimeoutCreate).Seconds() timeout = int(timeoutSeconds / tickerInterval.Seconds()) n = 0 + ticker = time.NewTicker(tickerInterval) ) - ticker := time.NewTicker(tickerInterval) for range ticker.C { cluster, _, err := client.Kubernetes.Get(context.Background(), d.Id()) if err != nil { diff --git a/digitalocean/resource_digitalocean_kubernetes_node_pool.go b/digitalocean/resource_digitalocean_kubernetes_node_pool.go index 2e8909252..61a4c985d 100644 --- a/digitalocean/resource_digitalocean_kubernetes_node_pool.go +++ b/digitalocean/resource_digitalocean_kubernetes_node_pool.go @@ -31,6 +31,7 @@ func resourceDigitalOceanKubernetesNodePool() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), }, } } @@ -126,8 +127,17 @@ func resourceDigitalOceanKubernetesNodePoolUpdate(ctx context.Context, d *schema func resourceDigitalOceanKubernetesNodePoolDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*CombinedConfig).godoClient() + _, err := client.Kubernetes.DeleteNodePool(context.Background(), d.Get("cluster_id").(string), d.Id()) + if err != nil { + return diag.Errorf("Unable to delete node pool %s", err) + } + + err = waitForKubernetesNodePoolDelete(client, d) + if err != nil { + return diag.FromErr(err) + } - return digitaloceanKubernetesNodePoolDelete(client, d.Get("cluster_id").(string), d.Id()) + return nil } func resourceDigitalOceanKubernetesNodePoolImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { @@ -276,21 +286,6 @@ func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, timeout time.Dura return p, nil } -func digitaloceanKubernetesNodePoolDelete(client *godo.Client, clusterID, poolID string) diag.Diagnostics { - // delete the old pool - _, err := client.Kubernetes.DeleteNodePool(context.Background(), clusterID, poolID) - if err != nil { - return diag.Errorf("Unable to delete node pool %s", err) - } - - err = waitForKubernetesNodePoolDelete(client, clusterID, poolID) - if err != nil { - return diag.FromErr(err) - } - - return nil -} - func waitForKubernetesNodePoolCreate(client *godo.Client, duration time.Duration, id string, poolID string) error { var ( tickerInterval = 10 * time.Second @@ -330,14 +325,17 @@ func waitForKubernetesNodePoolCreate(client *godo.Client, duration time.Duration return fmt.Errorf("Timeout waiting to create nodepool") } -func waitForKubernetesNodePoolDelete(client *godo.Client, id string, poolID string) error { - tickerInterval := 10 //10s - timeout := 1800 //1800s, 30min - n := 0 +func waitForKubernetesNodePoolDelete(client *godo.Client, d *schema.ResourceData) error { + var ( + tickerInterval = 10 * time.Second + timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds() + timeout = int(timeoutSeconds / tickerInterval.Seconds()) + n = 0 + ticker = time.NewTicker(tickerInterval) + ) - ticker := time.NewTicker(time.Duration(tickerInterval) * time.Second) for range ticker.C { - _, resp, err := client.Kubernetes.GetNodePool(context.Background(), id, poolID) + _, resp, err := client.Kubernetes.GetNodePool(context.Background(), d.Get("cluster_id").(string), d.Id()) if err != nil { ticker.Stop() @@ -348,7 +346,7 @@ func waitForKubernetesNodePoolDelete(client *godo.Client, id string, poolID stri return fmt.Errorf("Error trying to read nodepool state: %s", err) } - if n*tickerInterval > timeout { + if n > timeout { ticker.Stop() break }