diff --git a/go.mod b/go.mod index 9a5a79aa706..b2a6d0e3d85 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf // indirect github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-version v1.2.0 - github.com/hashicorp/terraform v0.12.20 // indirect + github.com/hashicorp/terraform v0.12.20 github.com/hashicorp/terraform-plugin-sdk v1.6.0 github.com/hashicorp/tf-sdk-migrator v1.0.0 // indirect github.com/hokaccha/go-prettyjson v0.0.0-20170213120834-e6b9231a2b1c // indirect diff --git a/go.sum b/go.sum index 8ecb5dd45e9..eeb1e2732af 100644 --- a/go.sum +++ b/go.sum @@ -272,6 +272,7 @@ github.com/hashicorp/terraform-plugin-sdk v1.0.0/go.mod h1:NuwtLpEpPsFaKJPJNGtMc github.com/hashicorp/terraform-plugin-sdk v1.5.0 h1:hzac/oigJkGup0kI+PwBGI4/fvG7Na8kM8j9xCBrmWo= github.com/hashicorp/terraform-plugin-sdk v1.6.0 h1:Um5hsAL7kKsfTHtan8lybY/d03F2bHu4fjRB1H6Ag4U= github.com/hashicorp/terraform-plugin-sdk v1.6.0/go.mod h1:H5QLx/uhwfxBZ59Bc5SqT19M4i+fYt7LZjHTpbLZiAg= +github.com/hashicorp/terraform-plugin-sdk v1.7.0 h1:B//oq0ZORG+EkVrIJy0uPGSonvmXqxSzXe8+GhknoW0= github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/tf-sdk-migrator v1.0.0/go.mod h1:QWLOMtAF2IMPDqKLN30L4ijNYVco4YF6HLul6VDPNfc= diff --git a/ibm/data_source_ibm_container_vpc_cluster.go b/ibm/data_source_ibm_container_vpc_cluster.go index 8f41b89d29d..bace8d66f6a 100644 --- a/ibm/data_source_ibm_container_vpc_cluster.go +++ b/ibm/data_source_ibm_container_vpc_cluster.go @@ -142,12 +142,24 @@ func dataSourceIBMContainerVPCCluster() *schema.Resource { }, }, }, + + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "resource_group_id": { Type: schema.TypeString, Optional: true, Description: "ID of the resource group.", Computed: true, }, + "public_service_endpoint": { Type: schema.TypeBool, Computed: true, @@ -269,6 +281,8 @@ func dataSourceIBMContainerClusterVPCRead(d *schema.ResourceData, meta interface d.Set("private_service_endpoint_url", cls.ServiceEndpoints.PrivateServiceEndpointURL) d.Set("public_service_endpoint", cls.ServiceEndpoints.PublicServiceEndpointEnabled) d.Set("private_service_endpoint", cls.ServiceEndpoints.PrivateServiceEndpointEnabled) + d.Set("ingress_hostname", cls.Ingress.HostName) + d.Set("ingress_secret", cls.Ingress.SecretName) workerFields, err := csClient.Workers().ListWorkers(clusterID, false, targetEnv) if err != nil { diff --git a/ibm/resource_ibm_container_vpc_cluster.go b/ibm/resource_ibm_container_vpc_cluster.go index 6c83314619d..2cd9c8a35b3 100644 --- a/ibm/resource_ibm_container_vpc_cluster.go +++ b/ibm/resource_ibm_container_vpc_cluster.go @@ -14,13 +14,17 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) const ( - deployRequested = "Deploy requested" - deployInProgress = "Deploy in progress" - ready = "Ready" - normal = "normal" + deployRequested = "Deploy requested" + deployInProgress = "Deploy in progress" + ready = "Ready" + normal = "normal" + masterNodeReady = "MasterNodeReady" + oneWorkerNodeReady = "OneWorkerNodeReady" + ingressReady = "IngressReady" ) func resourceIBMContainerVpcCluster() *schema.Resource { @@ -138,6 +142,15 @@ func resourceIBMContainerVpcCluster() *schema.Resource { Set: resourceIBMVPCHash, }, + "wait_till": { + Type: schema.TypeString, + Optional: true, + Default: ingressReady, + DiffSuppressFunc: applyOnce, + ValidateFunc: validation.StringInSlice([]string{masterNodeReady, oneWorkerNodeReady, ingressReady}, true), + Description: "wait_till can be configured for Master Ready, One worker Ready or Ingress Ready", + }, + ResourceControllerURL: { Type: schema.TypeString, Computed: true, @@ -224,6 +237,16 @@ func resourceIBMContainerVpcCluster() *schema.Resource { Description: "CRN of resource instance", }, + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + ResourceName: { Type: schema.TypeString, Computed: true, @@ -275,6 +298,12 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface flavor := d.Get("flavor").(string) workerCount := d.Get("worker_count").(int) + // timeoutStage will define the timeout stage + var timeoutStage string + if v, ok := d.GetOk("wait_till"); ok { + timeoutStage = v.(string) + } + var zonesList = make([]v2.Zone, 0) if res, ok := d.GetOk("zones"); ok { @@ -318,11 +347,27 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface return err } d.SetId(cls.ID) - _, err = waitForVpcClusterCreate(d, meta) - if err != nil { - return err - } + switch strings.ToLower(timeoutStage) { + + case strings.ToLower(masterNodeReady): + _, err = waitForVpcClusterMasterAvailable(d, meta) + if err != nil { + return err + } + + case strings.ToLower(oneWorkerNodeReady): + _, err = waitForVpcClusterOneWorkerAvailable(d, meta) + if err != nil { + return err + } + case strings.ToLower(ingressReady): + _, err = waitForVpcClusterIngressAvailable(d, meta) + if err != nil { + return err + } + + } return resourceIBMContainerVpcClusterUpdate(d, meta) } @@ -355,7 +400,7 @@ func resourceIBMContainerVpcClusterUpdate(d *schema.ResourceData, meta interface } } - if d.HasChange("kube_version") { + if d.HasChange("kube_version") && !d.IsNewResource() { ClusterClient, err := meta.(ClientSession).ContainerAPI() if err != nil { return err @@ -399,7 +444,7 @@ func resourceIBMContainerVpcClusterUpdate(d *schema.ResourceData, meta interface } } - if d.HasChange("worker_count") { + if d.HasChange("worker_count") && !d.IsNewResource() { count := d.Get("worker_count").(int) ClusterClient, err := meta.(ClientSession).ContainerAPI() if err != nil { @@ -475,6 +520,8 @@ func resourceIBMContainerVpcClusterRead(d *schema.ResourceData, meta interface{} d.Set("pod_subnet", cls.PodSubnet) d.Set("state", cls.State) d.Set("region", cls.Region) + d.Set("ingress_hostname", cls.Ingress.HostName) + d.Set("ingress_secret", cls.Ingress.SecretName) d.Set("albs", flattenVpcAlbs(albs, "all")) d.Set("resource_group_id", cls.ResourceGroupID) d.Set("public_service_endpoint_url", cls.ServiceEndpoints.PublicServiceEndpointURL) @@ -565,7 +612,7 @@ func waitForVpcClusterDelete(d *schema.ResourceData, meta interface{}) (interfac return deleteStateConf.WaitForState() } -func waitForVpcClusterCreate(d *schema.ResourceData, meta interface{}) (interface{}, error) { +func waitForVpcClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { targetEnv, err := getVpcClusterTargetHeader(d, meta) if err != nil { return nil, err @@ -577,7 +624,7 @@ func waitForVpcClusterCreate(d *schema.ResourceData, meta interface{}) (interfac clusterID := d.Id() createStateConf := &resource.StateChangeConf{ Pending: []string{deployRequested, deployInProgress}, - Target: []string{ready}, + Target: []string{normal}, Refresh: func() (interface{}, string, error) { workers, err := csClient.Workers().ListByWorkerPool(clusterID, "default", false, targetEnv) if err != nil { @@ -592,7 +639,7 @@ func waitForVpcClusterCreate(d *schema.ResourceData, meta interface{}) (interfac log.Println("worker health state: ", worker.Health.State) if worker.Health.State == normal { - return workers, ready, nil + return workers, normal, nil } } return workers, deployInProgress, nil @@ -606,6 +653,74 @@ func waitForVpcClusterCreate(d *schema.ResourceData, meta interface{}) (interfac return createStateConf.WaitForState() } +func waitForVpcClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + createStateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{ready}, + Refresh: func() (interface{}, string, error) { + clusterInfo, clusterInfoErr := csClient.Clusters().GetCluster(clusterID, targetEnv) + + if err != nil || clusterInfoErr != nil { + return clusterInfo, deployInProgress, err + } + + if clusterInfo.Lifecycle.MasterStatus == ready { + return clusterInfo, ready, nil + } + return clusterInfo, deployInProgress, nil + + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + ContinuousTargetOccurence: 5, + } + return createStateConf.WaitForState() +} + +func waitForVpcClusterIngressAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return nil, err + } + csClient, err := meta.(ClientSession).VpcContainerAPI() + if err != nil { + return nil, err + } + clusterID := d.Id() + createStateConf := &resource.StateChangeConf{ + Pending: []string{deployRequested, deployInProgress}, + Target: []string{ready}, + Refresh: func() (interface{}, string, error) { + clusterInfo, clusterInfoErr := csClient.Clusters().GetCluster(clusterID, targetEnv) + + if err != nil || clusterInfoErr != nil { + return clusterInfo, deployInProgress, err + } + + if clusterInfo.Ingress.HostName != "" { + return clusterInfo, ready, nil + } + return clusterInfo, deployInProgress, nil + + }, + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + ContinuousTargetOccurence: 5, + } + return createStateConf.WaitForState() +} + func getVpcClusterTargetHeader(d *schema.ResourceData, meta interface{}) (v2.ClusterTargetHeader, error) { resourceGroup := d.Get("resource_group_id").(string) diff --git a/website/docs/d/container_vpc_cluster.html.markdown b/website/docs/d/container_vpc_cluster.html.markdown index 6b99753ac96..4b9891e96ad 100644 --- a/website/docs/d/container_vpc_cluster.html.markdown +++ b/website/docs/d/container_vpc_cluster.html.markdown @@ -55,6 +55,8 @@ The following attributes are exported: * `load_balancer_hostname` - Host name of Load Balancer * `resize` - Resize of ALB * `disable_deployment` - Disable the ALB Deployment +* `ingress_hostname` - The Ingress hostname. +* `ingress_secret` - The Ingress secret. * `public_service_endpoint` - Is public service endpoint enabled to make the master publicly accessible. * `private_service_endpoint` - Is private service endpoint enabled to make the master privately accessible. * `public_service_endpoint_url` - Url of the public_service_endpoint diff --git a/website/docs/r/container_vpc_cluster.html.markdown b/website/docs/r/container_vpc_cluster.html.markdown index 86fb6565992..c79f1bd8dcd 100644 --- a/website/docs/r/container_vpc_cluster.html.markdown +++ b/website/docs/r/container_vpc_cluster.html.markdown @@ -45,8 +45,18 @@ The following arguments are supported: * `service_subnet` - (Optional, Forces new resource,String) Specify a custom subnet CIDR to provide private IP addresses for services. The subnet must be at least '/24' or larger. For more info, refer [here](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#service-subnet) Default value: '172.21.0.0/16'. * `worker_count` - (Optional, Int) The number of worker nodes per zone in the default worker pool. Default value '1'. * `resource_group_id` - (Optional, Forces new resource, string) The ID of the resource group. You can retrieve the value from data source `ibm_resource_group`. If not provided defaults to default resource group. -* `tags` - (Optional, array of strings) Tags associated with the container cluster instance. - **NOTE**: For users on account to add tags to a resource, they must be assigned the appropriate access. Learn more about tags permission [here](https://cloud.ibm.com/docs/resources?topic=resources-access) +* `tags` - (Optional, array of strings) Tags associated with the container cluster instance. +* `wait_till` - (Optional, String) The cluster creation happens in multi-stages. To avoid the longer wait times for resource execution, this field is introduced. +Resource will wait for only the specified stage and complete execution. The supported stages are + - *MasterNodeReady*: resource will wait till the master node is ready + - *OneWorkerNodeReady*: resource will wait till atleast one worker node becomes to ready state + - *IngressReady*: resource will wait till the ingress-host and ingress-secret are available. + + Default value: IngressReady + +**NOTE**: +1. For users on account to add tags to a resource, they must be assigned the appropriate access. Learn more about tags permission [here](https://cloud.ibm.com/docs/resources?topic=resources-access) +2. `wait_till` is set only for the first time creation of the resource, modification in the further runs will not any impacts. ## Attribute Reference @@ -55,6 +65,8 @@ The following attributes are exported: * `id` - Id of the cluster * `crn` - CRN of the cluster. +* `ingress_hostname` - The Ingress hostname. +* `ingress_secret` - The Ingress secret. * `master_status` - Status of kubernetes master. * `master_url` - The Master server URL. * `private_service_endpoint_url` - Private service endpoint url.