Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make create timeouts configurable for DBaaS and K8s clusters. #650

Merged
merged 3 commits into from
Jun 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 20 additions & 11 deletions digitalocean/resource_digitalocean_database_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,10 @@ func resourceDigitalOceanDatabaseCluster() *schema.Resource {
"tags": tagsSchema(),
},

Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
},

CustomizeDiff: customdiff.All(
transitionVersionToRequired(),
validateExclusiveAttributes(),
Expand Down Expand Up @@ -246,14 +250,15 @@ func resourceDigitalOceanDatabaseClusterCreate(ctx context.Context, d *schema.Re
}
}

database, err = waitForDatabaseCluster(client, database.ID, "online")
d.SetId(database.ID)
log.Printf("[INFO] database cluster Name: %s", database.Name)

database, err = waitForDatabaseCluster(client, d, "online")
if err != nil {
d.SetId("")
return diag.Errorf("Error creating database cluster: %s", err)
}

d.SetId(database.ID)
log.Printf("[INFO] database cluster Name: %s", database.Name)

if v, ok := d.GetOk("maintenance_window"); ok {
opts := expandMaintWindowOpts(v.([]interface{}))

Expand Down Expand Up @@ -308,7 +313,7 @@ func resourceDigitalOceanDatabaseClusterUpdate(ctx context.Context, d *schema.Re
return diag.Errorf("Error resizing database cluster: %s", err)
}

_, err = waitForDatabaseCluster(client, d.Id(), "online")
_, err = waitForDatabaseCluster(client, d, "online")
if err != nil {
return diag.Errorf("Error resizing database cluster: %s", err)
}
Expand All @@ -331,7 +336,7 @@ func resourceDigitalOceanDatabaseClusterUpdate(ctx context.Context, d *schema.Re
return diag.Errorf("Error migrating database cluster: %s", err)
}

_, err = waitForDatabaseCluster(client, d.Id(), "online")
_, err = waitForDatabaseCluster(client, d, "online")
if err != nil {
return diag.Errorf("Error migrating database cluster: %s", err)
}
Expand Down Expand Up @@ -456,13 +461,17 @@ func resourceDigitalOceanDatabaseClusterDelete(ctx context.Context, d *schema.Re
return nil
}

func waitForDatabaseCluster(client *godo.Client, id string, status string) (*godo.Database, error) {
ticker := time.NewTicker(15 * time.Second)
timeout := 120
n := 0
func waitForDatabaseCluster(client *godo.Client, d *schema.ResourceData, status string) (*godo.Database, error) {
var (
tickerInterval = 15 * time.Second
timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds()
timeout = int(timeoutSeconds / tickerInterval.Seconds())
n = 0
ticker = time.NewTicker(tickerInterval)
)

for range ticker.C {
database, _, err := client.Databases.Get(context.Background(), id)
database, _, err := client.Databases.Get(context.Background(), d.Id())
if err != nil {
ticker.Stop()
return nil, fmt.Errorf("Error trying to read database cluster state: %s", err)
Expand Down
30 changes: 20 additions & 10 deletions digitalocean/resource_digitalocean_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,10 @@ func resourceDigitalOceanKubernetesCluster() *schema.Resource {
},
},

Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
},

CustomizeDiff: customdiff.All(
customdiff.ForceNewIfChange("version", func(ctx context.Context, old, new, meta interface{}) bool {
// "version" can only be upgraded to newer versions, so we must create a new resource
Expand Down Expand Up @@ -270,15 +274,16 @@ func resourceDigitalOceanKubernetesClusterCreate(ctx context.Context, d *schema.
return diag.Errorf("Error creating Kubernetes cluster: %s", err)
}

// set the cluster id
d.SetId(cluster.ID)

// wait for completion
cluster, err = waitForKubernetesClusterCreate(client, cluster.ID)
_, err = waitForKubernetesClusterCreate(client, d)
if err != nil {
d.SetId("")
return diag.Errorf("Error creating Kubernetes cluster: %s", err)
}

// set the cluster id
d.SetId(cluster.ID)

return resourceDigitalOceanKubernetesClusterRead(ctx, d, meta)
}

Expand Down Expand Up @@ -419,7 +424,8 @@ func resourceDigitalOceanKubernetesClusterUpdate(ctx context.Context, d *schema.
}

// update the existing default pool
_, err := digitaloceanKubernetesNodePoolUpdate(client, newPool, d.Id(), oldPool["id"].(string), digitaloceanKubernetesDefaultNodePoolTag)
timeout := d.Timeout(schema.TimeoutCreate)
_, err := digitaloceanKubernetesNodePoolUpdate(client, timeout, newPool, d.Id(), oldPool["id"].(string), digitaloceanKubernetesDefaultNodePoolTag)
if err != nil {
return diag.FromErr(err)
}
Expand Down Expand Up @@ -547,13 +553,17 @@ func resourceDigitalOceanKubernetesClusterImportState(d *schema.ResourceData, me
return resourceDatas, nil
}

func waitForKubernetesClusterCreate(client *godo.Client, id string) (*godo.KubernetesCluster, error) {
ticker := time.NewTicker(10 * time.Second)
timeout := 120
n := 0
func waitForKubernetesClusterCreate(client *godo.Client, d *schema.ResourceData) (*godo.KubernetesCluster, error) {
var (
tickerInterval = 10 * time.Second
timeoutSeconds = d.Timeout(schema.TimeoutCreate).Seconds()
timeout = int(timeoutSeconds / tickerInterval.Seconds())
n = 0
ticker = time.NewTicker(tickerInterval)
)

for range ticker.C {
cluster, _, err := client.Kubernetes.Get(context.Background(), id)
cluster, _, err := client.Kubernetes.Get(context.Background(), d.Id())
if err != nil {
ticker.Stop()
return nil, fmt.Errorf("Error trying to read cluster state: %s", err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ func TestAccDigitalOceanKubernetesCluster_Basic(t *testing.T) {
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "vpc_uuid"),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "auto_upgrade"),
resource.TestMatchResourceAttr("digitalocean_kubernetes_cluster.foobar", "urn", expectedURNRegEx),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "maintenance_policy"),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "maintenance_policy.0.day"),
resource.TestCheckResourceAttrSet("digitalocean_kubernetes_cluster.foobar", "maintenance_policy.0.start_time"),
),
},
// Update: remove default node_pool taints
Expand Down
77 changes: 42 additions & 35 deletions digitalocean/resource_digitalocean_kubernetes_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ func resourceDigitalOceanKubernetesNodePool() *schema.Resource {
SchemaVersion: 1,

Schema: nodePoolSchema(true),

Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
Delete: schema.DefaultTimeout(30 * time.Minute),
},
}
}

Expand All @@ -46,7 +51,8 @@ func resourceDigitalOceanKubernetesNodePoolCreate(ctx context.Context, d *schema
"taint": d.Get("taint"),
}

pool, err := digitaloceanKubernetesNodePoolCreate(client, rawPool, d.Get("cluster_id").(string))
timeout := d.Timeout(schema.TimeoutCreate)
pool, err := digitaloceanKubernetesNodePoolCreate(client, timeout, rawPool, d.Get("cluster_id").(string))
if err != nil {
return diag.Errorf("Error creating Kubernetes node pool: %s", err)
}
Expand Down Expand Up @@ -110,7 +116,8 @@ func resourceDigitalOceanKubernetesNodePoolUpdate(ctx context.Context, d *schema
_, newTaint := d.GetChange("taint")
rawPool["taint"] = newTaint

_, err := digitaloceanKubernetesNodePoolUpdate(client, rawPool, d.Get("cluster_id").(string), d.Id())
timeout := d.Timeout(schema.TimeoutCreate)
_, err := digitaloceanKubernetesNodePoolUpdate(client, timeout, rawPool, d.Get("cluster_id").(string), d.Id())
if err != nil {
return diag.Errorf("Error updating node pool: %s", err)
}
Expand All @@ -120,8 +127,17 @@ func resourceDigitalOceanKubernetesNodePoolUpdate(ctx context.Context, d *schema

func resourceDigitalOceanKubernetesNodePoolDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*CombinedConfig).godoClient()
_, err := client.Kubernetes.DeleteNodePool(context.Background(), d.Get("cluster_id").(string), d.Id())
if err != nil {
return diag.Errorf("Unable to delete node pool %s", err)
}

return digitaloceanKubernetesNodePoolDelete(client, d.Get("cluster_id").(string), d.Id())
err = waitForKubernetesNodePoolDelete(client, d)
if err != nil {
return diag.FromErr(err)
}

return nil
}

func resourceDigitalOceanKubernetesNodePoolImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
Expand Down Expand Up @@ -186,7 +202,7 @@ func resourceDigitalOceanKubernetesNodePoolImportState(d *schema.ResourceData, m
return []*schema.ResourceData{d}, nil
}

func digitaloceanKubernetesNodePoolCreate(client *godo.Client, pool map[string]interface{}, clusterID string, customTags ...string) (*godo.KubernetesNodePool, error) {
func digitaloceanKubernetesNodePoolCreate(client *godo.Client, timeout time.Duration, pool map[string]interface{}, clusterID string, customTags ...string) (*godo.KubernetesNodePool, error) {
// append any custom tags
tags := expandTags(pool["tags"].(*schema.Set).List())
tags = append(tags, customTags...)
Expand All @@ -209,15 +225,15 @@ func digitaloceanKubernetesNodePoolCreate(client *godo.Client, pool map[string]i
return nil, fmt.Errorf("Unable to create new default node pool %s", err)
}

err = waitForKubernetesNodePoolCreate(client, clusterID, p.ID)
err = waitForKubernetesNodePoolCreate(client, timeout, clusterID, p.ID)
if err != nil {
return nil, err
}

return p, nil
}

func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, pool map[string]interface{}, clusterID, poolID string, customTags ...string) (*godo.KubernetesNodePool, error) {
func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, timeout time.Duration, pool map[string]interface{}, clusterID, poolID string, customTags ...string) (*godo.KubernetesNodePool, error) {
tags := expandTags(pool["tags"].(*schema.Set).List())
tags = append(tags, customTags...)

Expand Down Expand Up @@ -262,35 +278,23 @@ func digitaloceanKubernetesNodePoolUpdate(client *godo.Client, pool map[string]i
return nil, fmt.Errorf("Unable to update nodepool: %s", err)
}

err = waitForKubernetesNodePoolCreate(client, clusterID, p.ID)
err = waitForKubernetesNodePoolCreate(client, timeout, clusterID, p.ID)
if err != nil {
return nil, err
}

return p, nil
}

func digitaloceanKubernetesNodePoolDelete(client *godo.Client, clusterID, poolID string) diag.Diagnostics {
// delete the old pool
_, err := client.Kubernetes.DeleteNodePool(context.Background(), clusterID, poolID)
if err != nil {
return diag.Errorf("Unable to delete node pool %s", err)
}

err = waitForKubernetesNodePoolDelete(client, clusterID, poolID)
if err != nil {
return diag.FromErr(err)
}

return nil
}

func waitForKubernetesNodePoolCreate(client *godo.Client, id string, poolID string) error {
tickerInterval := 10 //10s
timeout := 1800 //1800s, 30min
n := 0
func waitForKubernetesNodePoolCreate(client *godo.Client, duration time.Duration, id string, poolID string) error {
var (
tickerInterval = 10 * time.Second
timeoutSeconds = duration.Seconds()
timeout = int(timeoutSeconds / tickerInterval.Seconds())
n = 0
)

ticker := time.NewTicker(time.Duration(tickerInterval) * time.Second)
ticker := time.NewTicker(tickerInterval)
for range ticker.C {
pool, _, err := client.Kubernetes.GetNodePool(context.Background(), id, poolID)
if err != nil {
Expand All @@ -310,7 +314,7 @@ func waitForKubernetesNodePoolCreate(client *godo.Client, id string, poolID stri
return nil
}

if n*tickerInterval > timeout {
if n > timeout {
ticker.Stop()
break
}
Expand All @@ -321,14 +325,17 @@ func waitForKubernetesNodePoolCreate(client *godo.Client, id string, poolID stri
return fmt.Errorf("Timeout waiting to create nodepool")
}

func waitForKubernetesNodePoolDelete(client *godo.Client, id string, poolID string) error {
tickerInterval := 10 //10s
timeout := 1800 //1800s, 30min
n := 0
func waitForKubernetesNodePoolDelete(client *godo.Client, d *schema.ResourceData) error {
var (
tickerInterval = 10 * time.Second
timeoutSeconds = d.Timeout(schema.TimeoutDelete).Seconds()
timeout = int(timeoutSeconds / tickerInterval.Seconds())
n = 0
ticker = time.NewTicker(tickerInterval)
)

ticker := time.NewTicker(time.Duration(tickerInterval) * time.Second)
for range ticker.C {
_, resp, err := client.Kubernetes.GetNodePool(context.Background(), id, poolID)
_, resp, err := client.Kubernetes.GetNodePool(context.Background(), d.Get("cluster_id").(string), d.Id())
if err != nil {
ticker.Stop()

Expand All @@ -339,7 +346,7 @@ func waitForKubernetesNodePoolDelete(client *godo.Client, id string, poolID stri
return fmt.Errorf("Error trying to read nodepool state: %s", err)
}

if n*tickerInterval > timeout {
if n > timeout {
ticker.Stop()
break
}
Expand Down