From b4767c19917e364f3c0817ebbb5ae7f6fc75dff7 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 11:08:58 -0700 Subject: [PATCH 01/19] Start of edge node addition --- azurerm/helpers/azure/hdinsight.go | 12 +++ .../resource_arm_hdinsight_hadoop_cluster.go | 43 ++++++++ ...ource_arm_hdinsight_hadoop_cluster_test.go | 97 +++++++++++++++++++ .../resource_arm_hdinsight_hbase_cluster.go | 3 + ...arm_hdinsight_interactive_query_cluster.go | 3 + .../resource_arm_hdinsight_kafka_cluster.go | 3 + ...ource_arm_hdinsight_ml_services_cluster.go | 4 + .../resource_arm_hdinsight_rserver_cluster.go | 4 + .../resource_arm_hdinsight_spark_cluster.go | 3 + .../resource_arm_hdinsight_storm_cluster.go | 3 + 10 files changed, 175 insertions(+) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 264f4efc168e..cf8e088df8e6 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -210,6 +210,7 @@ type HDInsightNodeDefinition struct { MaxNumberOfDisksPerNode *int FixedMinInstanceCount *int32 FixedTargetInstanceCount *int32 + Required bool } func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNodeDefinition) *schema.Schema { @@ -367,6 +368,17 @@ func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNo } } + if !definition.Required { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: result, + }, + } + } + return &schema.Schema{ Type: schema.TypeList, Required: true, diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 89a80cc435aa..53c331c05d2b 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -20,6 +20,7 @@ var hdInsightHadoopClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightHadoopClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -27,6 +28,7 @@ var hdInsightHadoopClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 25, CanSpecifyDisks: false, + Required: true, } var hdInsightHadoopClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -36,6 +38,17 @@ var hdInsightHadoopClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinitio CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, +} + +var hdInsightHadoopClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ + CanSpecifyInstanceCount: false, + MinInstanceCount: 1, + MaxInstanceCount: 1, + CanSpecifyDisks: false, + FixedMinInstanceCount: utils.Int32(int32(1)), + FixedTargetInstanceCount: utils.Int32(int32(1)), + Required: false, } func resourceArmHDInsightHadoopCluster() *schema.Resource { @@ -89,6 +102,8 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "worker_node": azure.SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightHadoopClusterWorkerNodeDefinition), "zookeeper_node": azure.SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHadoopClusterZookeeperNodeDefinition), + + "edge_node": azure.SchemaHDInsightNodeDefinition("roles.0.edge_node", hdInsightHadoopClusterEdgeNodeDefinition), }, }, }, @@ -195,6 +210,34 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf d.SetId(*read.ID) + // Add edge node after creation + if v, ok := d.GetOk("roles.0.edge_node"); ok { + edgeNodeRaw := v.([]interface{}) + applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient + + edgeNode, err := azure.ExpandHDInsightNodeDefinition("edgenode", edgeNodeRaw, hdInsightHadoopClusterEdgeNodeDefinition) + if err != nil { + return fmt.Errorf("Error expanding `roles.0.edge_node`: %+v", err) + } + roles := make([]hdinsight.Role, 0) + roles = append(roles, *edgeNode) + application := hdinsight.Application{ + Properties: &hdinsight.ApplicationProperties{ + ComputeProfile: &hdinsight.ComputeProfile{ + Roles: &roles, + }, + }, + } + future, err := applicationsClient.Create(ctx, resourceGroup, name, name, application) + if err != nil { + return fmt.Errorf("Error creating edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for creation of edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + return resourceArmHDInsightHadoopClusterRead(d, meta) } diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 240ba0be140b..93d9d1ac8289 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -249,6 +249,45 @@ func TestAccAzureRMHDInsightHadoopCluster_complete(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "roles.0.edge_node.0.password", + "roles.0.edge_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` @@ -580,6 +619,64 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, rInt, rInt, rInt) } +func testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Premium" + + component_version { + hadoop = "2.7" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = "${azurerm_storage_container.test.id}" + storage_account_key = "${azurerm_storage_account.test.primary_access_key}" + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + edge_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster.go b/azurerm/resource_arm_hdinsight_hbase_cluster.go index 469d24b37126..5376cdbc9426 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster.go @@ -19,6 +19,7 @@ var hdInsightHBaseClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightHBaseClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -26,6 +27,7 @@ var hdInsightHBaseClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 23, CanSpecifyDisks: false, + Required: true, } var hdInsightHBaseClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -34,6 +36,7 @@ var hdInsightHBaseClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, } func resourceArmHDInsightHBaseCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go index 7ab6b23f6272..9e9876c2dc1e 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go @@ -19,6 +19,7 @@ var hdInsightInteractiveQueryClusterHeadNodeDefinition = azure.HDInsightNodeDefi MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightInteractiveQueryClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -26,6 +27,7 @@ var hdInsightInteractiveQueryClusterWorkerNodeDefinition = azure.HDInsightNodeDe MinInstanceCount: 1, MaxInstanceCount: 9, CanSpecifyDisks: false, + Required: true, } var hdInsightInteractiveQueryClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -34,6 +36,7 @@ var hdInsightInteractiveQueryClusterZookeeperNodeDefinition = azure.HDInsightNod MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, } func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster.go b/azurerm/resource_arm_hdinsight_kafka_cluster.go index aad20f2dfeb9..2690556b0177 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster.go @@ -19,6 +19,7 @@ var hdInsightKafkaClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightKafkaClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -27,6 +28,7 @@ var hdInsightKafkaClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 57, CanSpecifyDisks: true, MaxNumberOfDisksPerNode: utils.Int(8), + Required: true, } var hdInsightKafkaClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -35,6 +37,7 @@ var hdInsightKafkaClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, } func resourceArmHDInsightKafkaCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index ff15e3582565..179e55186555 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -21,6 +21,7 @@ var hdInsightMLServicesClusterHeadNodeDefinition = azure.HDInsightNodeDefinition CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightMLServicesClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -28,6 +29,7 @@ var hdInsightMLServicesClusterWorkerNodeDefinition = azure.HDInsightNodeDefiniti MinInstanceCount: 1, MaxInstanceCount: 16, CanSpecifyDisks: false, + Required: true, } var hdInsightMLServicesClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -37,6 +39,7 @@ var hdInsightMLServicesClusterZookeeperNodeDefinition = azure.HDInsightNodeDefin CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, } var hdInsightMLServicesClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ @@ -45,6 +48,7 @@ var hdInsightMLServicesClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 1, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(1)), + Required: true, } func resourceArmHDInsightMLServicesCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index 937d94af3bff..46dd798c4bcf 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -21,6 +21,7 @@ var hdInsightRServerClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightRServerClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -28,6 +29,7 @@ var hdInsightRServerClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 16, CanSpecifyDisks: false, + Required: true, } var hdInsightRServerClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -37,6 +39,7 @@ var hdInsightRServerClusterZookeeperNodeDefinition = azure.HDInsightNodeDefiniti CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, } var hdInsightRServerClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ @@ -45,6 +48,7 @@ var hdInsightRServerClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 1, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(1)), + Required: true, } func resourceArmHDInsightRServerCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_spark_cluster.go b/azurerm/resource_arm_hdinsight_spark_cluster.go index 71f6a997b68f..4f152b333893 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster.go @@ -19,6 +19,7 @@ var hdInsightSparkClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightSparkClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -26,6 +27,7 @@ var hdInsightSparkClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 19, CanSpecifyDisks: false, + Required: true, } var hdInsightSparkClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -34,6 +36,7 @@ var hdInsightSparkClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, FixedTargetInstanceCount: utils.Int32(int32(3)), CanSpecifyDisks: false, + Required: true, } func resourceArmHDInsightSparkCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_storm_cluster.go b/azurerm/resource_arm_hdinsight_storm_cluster.go index 0573ee82eb29..b97dbd550191 100644 --- a/azurerm/resource_arm_hdinsight_storm_cluster.go +++ b/azurerm/resource_arm_hdinsight_storm_cluster.go @@ -19,6 +19,7 @@ var hdInsightStormClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 4, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), + Required: true, } var hdInsightStormClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -27,6 +28,7 @@ var hdInsightStormClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ // can't find a hard limit - appears to be limited by the subscription; setting something sensible for now MaxInstanceCount: 9999, CanSpecifyDisks: false, + Required: true, } var hdInsightStormClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -35,6 +37,7 @@ var hdInsightStormClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), + Required: true, } func resourceArmHDInsightStormCluster() *schema.Resource { From f59db70a3e9eb6f49cdab63dc6254d3914f9d7b6 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 11:30:04 -0700 Subject: [PATCH 02/19] Add application type --- azurerm/resource_arm_hdinsight_hadoop_cluster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 53c331c05d2b..16bb7427e333 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -226,6 +226,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf ComputeProfile: &hdinsight.ComputeProfile{ Roles: &roles, }, + ApplicationType: utils.String("CustomApplication"), }, } future, err := applicationsClient.Create(ctx, resourceGroup, name, name, application) From 5a56e15a0fda9fa691c5dad770133805f9053879 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 13:53:34 -0700 Subject: [PATCH 03/19] Remove abstraction --- azurerm/helpers/azure/hdinsight.go | 184 +++++++++--------- .../resource_arm_hdinsight_hadoop_cluster.go | 25 ++- 2 files changed, 111 insertions(+), 98 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index cf8e088df8e6..6a15e3e4e8dc 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -213,98 +213,102 @@ type HDInsightNodeDefinition struct { Required bool } +func SchemaHDInsightNodeDefinitionVMSize() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + // short of deploying every VM Sku for every node type for every HDInsight Cluster + // this is the list I've (@tombuildsstuff) found for valid SKU's from an endpoint in the Portal + // using another SKU causes a bad request from the API - as such this is a best effort UX + "ExtraSmall", + "Small", + "Medium", + "Large", + "ExtraLarge", + "A5", + "A6", + "A7", + "A8", + "A9", + "A10", + "A11", + "Standard_A1_V2", + "Standard_A2_V2", + "Standard_A2m_V2", + "Standard_A3", + "Standard_A4_V2", + "Standard_A4m_V2", + "Standard_A8_V2", + "Standard_A8m_V2", + "Standard_D1", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D1_V2", + "Standard_D2_V2", + "Standard_D3_V2", + "Standard_D4_V2", + "Standard_D5_V2", + "Standard_D11_V2", + "Standard_D12_V2", + "Standard_D13_V2", + "Standard_D14_V2", + "Standard_DS1_V2", + "Standard_DS2_V2", + "Standard_DS3_V2", + "Standard_DS4_V2", + "Standard_DS5_V2", + "Standard_DS11_V2", + "Standard_DS12_V2", + "Standard_DS13_V2", + "Standard_DS14_V2", + "Standard_E2_V3", + "Standard_E4_V3", + "Standard_E8_V3", + "Standard_E16_V3", + "Standard_E20_V3", + "Standard_E32_V3", + "Standard_E64_V3", + "Standard_E64i_V3", + "Standard_E2s_V3", + "Standard_E4s_V3", + "Standard_E8s_V3", + "Standard_E16s_V3", + "Standard_E20s_V3", + "Standard_E32s_V3", + "Standard_E64s_V3", + "Standard_E64is_V3", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_F2s_V2", + "Standard_F4s_V2", + "Standard_F8s_V2", + "Standard_F16s_V2", + "Standard_F32s_V2", + "Standard_F64s_V2", + "Standard_F72s_V2", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5", + "Standard_NC24", + }, true), + } +} + func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNodeDefinition) *schema.Schema { result := map[string]*schema.Schema{ - "vm_size": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - // short of deploying every VM Sku for every node type for every HDInsight Cluster - // this is the list I've (@tombuildsstuff) found for valid SKU's from an endpoint in the Portal - // using another SKU causes a bad request from the API - as such this is a best effort UX - "ExtraSmall", - "Small", - "Medium", - "Large", - "ExtraLarge", - "A5", - "A6", - "A7", - "A8", - "A9", - "A10", - "A11", - "Standard_A1_V2", - "Standard_A2_V2", - "Standard_A2m_V2", - "Standard_A3", - "Standard_A4_V2", - "Standard_A4m_V2", - "Standard_A8_V2", - "Standard_A8m_V2", - "Standard_D1", - "Standard_D2", - "Standard_D3", - "Standard_D4", - "Standard_D11", - "Standard_D12", - "Standard_D13", - "Standard_D14", - "Standard_D1_V2", - "Standard_D2_V2", - "Standard_D3_V2", - "Standard_D4_V2", - "Standard_D5_V2", - "Standard_D11_V2", - "Standard_D12_V2", - "Standard_D13_V2", - "Standard_D14_V2", - "Standard_DS1_V2", - "Standard_DS2_V2", - "Standard_DS3_V2", - "Standard_DS4_V2", - "Standard_DS5_V2", - "Standard_DS11_V2", - "Standard_DS12_V2", - "Standard_DS13_V2", - "Standard_DS14_V2", - "Standard_E2_V3", - "Standard_E4_V3", - "Standard_E8_V3", - "Standard_E16_V3", - "Standard_E20_V3", - "Standard_E32_V3", - "Standard_E64_V3", - "Standard_E64i_V3", - "Standard_E2s_V3", - "Standard_E4s_V3", - "Standard_E8s_V3", - "Standard_E16s_V3", - "Standard_E20s_V3", - "Standard_E32s_V3", - "Standard_E64s_V3", - "Standard_E64is_V3", - "Standard_G1", - "Standard_G2", - "Standard_G3", - "Standard_G4", - "Standard_G5", - "Standard_F2s_V2", - "Standard_F4s_V2", - "Standard_F8s_V2", - "Standard_F16s_V2", - "Standard_F32s_V2", - "Standard_F64s_V2", - "Standard_F72s_V2", - "Standard_GS1", - "Standard_GS2", - "Standard_GS3", - "Standard_GS4", - "Standard_GS5", - "Standard_NC24", - }, true), - }, + "vm_size": SchemaHDInsightNodeDefinitionVMSize(), "username": { Type: schema.TypeString, Required: true, diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 16bb7427e333..cea69993ba26 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -103,7 +103,16 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "zookeeper_node": azure.SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHadoopClusterZookeeperNodeDefinition), - "edge_node": azure.SchemaHDInsightNodeDefinition("roles.0.edge_node", hdInsightHadoopClusterEdgeNodeDefinition), + "edge_node": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm_size": azure.SchemaHDInsightNodeDefinitionVMSize(), + }, + }, + }, }, }, }, @@ -215,16 +224,16 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf edgeNodeRaw := v.([]interface{}) applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient - edgeNode, err := azure.ExpandHDInsightNodeDefinition("edgenode", edgeNodeRaw, hdInsightHadoopClusterEdgeNodeDefinition) - if err != nil { - return fmt.Errorf("Error expanding `roles.0.edge_node`: %+v", err) - } - roles := make([]hdinsight.Role, 0) - roles = append(roles, *edgeNode) + v := edgeNodeRaw[0].(map[string]interface{}) application := hdinsight.Application{ Properties: &hdinsight.ApplicationProperties{ ComputeProfile: &hdinsight.ComputeProfile{ - Roles: &roles, + Roles: &[]hdinsight.Role{hdinsight.Role{ + Name: utils.String("edgenode"), + HardwareProfile: &hdinsight.HardwareProfile{ + VMSize: utils.String(v["vm_size"].(string)), + }, + }}, }, ApplicationType: utils.String("CustomApplication"), }, From cc272cf461a7d31951532b56950ba35a5da428aa Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 13:53:54 -0700 Subject: [PATCH 04/19] Fmt --- azurerm/resource_arm_hdinsight_hadoop_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index cea69993ba26..ff8fb1754174 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -228,7 +228,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf application := hdinsight.Application{ Properties: &hdinsight.ApplicationProperties{ ComputeProfile: &hdinsight.ComputeProfile{ - Roles: &[]hdinsight.Role{hdinsight.Role{ + Roles: &[]hdinsight.Role{{ Name: utils.String("edgenode"), HardwareProfile: &hdinsight.HardwareProfile{ VMSize: utils.String(v["vm_size"].(string)), From fe4cb79cd4bfaa7d944a314bbb1b2a8a254c7dc3 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 13:58:40 -0700 Subject: [PATCH 05/19] Fix test --- azurerm/resource_arm_hdinsight_hadoop_cluster_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 93d9d1ac8289..c6f74608be17 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -669,8 +669,6 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { edge_node { vm_size = "Standard_D3_V2" - username = "acctestusrvm" - password = "AccTestvdSC4daf986!" } } } From b98a7bd6266f9357c11069f4709e82f85504f118 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 14:20:32 -0700 Subject: [PATCH 06/19] Add target instance count --- azurerm/helpers/azure/hdinsight.go | 12 ------------ azurerm/resource_arm_hdinsight_hadoop_cluster.go | 14 +------------- azurerm/resource_arm_hdinsight_hbase_cluster.go | 3 --- ...urce_arm_hdinsight_interactive_query_cluster.go | 3 --- azurerm/resource_arm_hdinsight_kafka_cluster.go | 3 --- .../resource_arm_hdinsight_ml_services_cluster.go | 4 ---- azurerm/resource_arm_hdinsight_rserver_cluster.go | 4 ---- azurerm/resource_arm_hdinsight_spark_cluster.go | 3 --- azurerm/resource_arm_hdinsight_storm_cluster.go | 3 --- 9 files changed, 1 insertion(+), 48 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 6a15e3e4e8dc..326019443b07 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -210,7 +210,6 @@ type HDInsightNodeDefinition struct { MaxNumberOfDisksPerNode *int FixedMinInstanceCount *int32 FixedTargetInstanceCount *int32 - Required bool } func SchemaHDInsightNodeDefinitionVMSize() *schema.Schema { @@ -372,17 +371,6 @@ func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNo } } - if !definition.Required { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: result, - }, - } - } - return &schema.Schema{ Type: schema.TypeList, Required: true, diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index ff8fb1754174..bd2e3093a1af 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -20,7 +20,6 @@ var hdInsightHadoopClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightHadoopClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -28,7 +27,6 @@ var hdInsightHadoopClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 25, CanSpecifyDisks: false, - Required: true, } var hdInsightHadoopClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -38,17 +36,6 @@ var hdInsightHadoopClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinitio CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, -} - -var hdInsightHadoopClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ - CanSpecifyInstanceCount: false, - MinInstanceCount: 1, - MaxInstanceCount: 1, - CanSpecifyDisks: false, - FixedMinInstanceCount: utils.Int32(int32(1)), - FixedTargetInstanceCount: utils.Int32(int32(1)), - Required: false, } func resourceArmHDInsightHadoopCluster() *schema.Resource { @@ -233,6 +220,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf HardwareProfile: &hdinsight.HardwareProfile{ VMSize: utils.String(v["vm_size"].(string)), }, + TargetInstanceCount: utils.Int32(1), }}, }, ApplicationType: utils.String("CustomApplication"), diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster.go b/azurerm/resource_arm_hdinsight_hbase_cluster.go index 5376cdbc9426..469d24b37126 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster.go @@ -19,7 +19,6 @@ var hdInsightHBaseClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightHBaseClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -27,7 +26,6 @@ var hdInsightHBaseClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 23, CanSpecifyDisks: false, - Required: true, } var hdInsightHBaseClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -36,7 +34,6 @@ var hdInsightHBaseClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, } func resourceArmHDInsightHBaseCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go index 9e9876c2dc1e..7ab6b23f6272 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go @@ -19,7 +19,6 @@ var hdInsightInteractiveQueryClusterHeadNodeDefinition = azure.HDInsightNodeDefi MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightInteractiveQueryClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -27,7 +26,6 @@ var hdInsightInteractiveQueryClusterWorkerNodeDefinition = azure.HDInsightNodeDe MinInstanceCount: 1, MaxInstanceCount: 9, CanSpecifyDisks: false, - Required: true, } var hdInsightInteractiveQueryClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -36,7 +34,6 @@ var hdInsightInteractiveQueryClusterZookeeperNodeDefinition = azure.HDInsightNod MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, } func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster.go b/azurerm/resource_arm_hdinsight_kafka_cluster.go index 2690556b0177..aad20f2dfeb9 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster.go @@ -19,7 +19,6 @@ var hdInsightKafkaClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightKafkaClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -28,7 +27,6 @@ var hdInsightKafkaClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 57, CanSpecifyDisks: true, MaxNumberOfDisksPerNode: utils.Int(8), - Required: true, } var hdInsightKafkaClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -37,7 +35,6 @@ var hdInsightKafkaClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, } func resourceArmHDInsightKafkaCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index 179e55186555..ff15e3582565 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -21,7 +21,6 @@ var hdInsightMLServicesClusterHeadNodeDefinition = azure.HDInsightNodeDefinition CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightMLServicesClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -29,7 +28,6 @@ var hdInsightMLServicesClusterWorkerNodeDefinition = azure.HDInsightNodeDefiniti MinInstanceCount: 1, MaxInstanceCount: 16, CanSpecifyDisks: false, - Required: true, } var hdInsightMLServicesClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -39,7 +37,6 @@ var hdInsightMLServicesClusterZookeeperNodeDefinition = azure.HDInsightNodeDefin CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, } var hdInsightMLServicesClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ @@ -48,7 +45,6 @@ var hdInsightMLServicesClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 1, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(1)), - Required: true, } func resourceArmHDInsightMLServicesCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index 46dd798c4bcf..937d94af3bff 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -21,7 +21,6 @@ var hdInsightRServerClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightRServerClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -29,7 +28,6 @@ var hdInsightRServerClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 16, CanSpecifyDisks: false, - Required: true, } var hdInsightRServerClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -39,7 +37,6 @@ var hdInsightRServerClusterZookeeperNodeDefinition = azure.HDInsightNodeDefiniti CanSpecifyDisks: false, FixedMinInstanceCount: utils.Int32(int32(1)), FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, } var hdInsightRServerClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ @@ -48,7 +45,6 @@ var hdInsightRServerClusterEdgeNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 1, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(1)), - Required: true, } func resourceArmHDInsightRServerCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_spark_cluster.go b/azurerm/resource_arm_hdinsight_spark_cluster.go index 4f152b333893..71f6a997b68f 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster.go @@ -19,7 +19,6 @@ var hdInsightSparkClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 2, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightSparkClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -27,7 +26,6 @@ var hdInsightSparkClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ MinInstanceCount: 1, MaxInstanceCount: 19, CanSpecifyDisks: false, - Required: true, } var hdInsightSparkClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -36,7 +34,6 @@ var hdInsightSparkClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, FixedTargetInstanceCount: utils.Int32(int32(3)), CanSpecifyDisks: false, - Required: true, } func resourceArmHDInsightSparkCluster() *schema.Resource { diff --git a/azurerm/resource_arm_hdinsight_storm_cluster.go b/azurerm/resource_arm_hdinsight_storm_cluster.go index b97dbd550191..0573ee82eb29 100644 --- a/azurerm/resource_arm_hdinsight_storm_cluster.go +++ b/azurerm/resource_arm_hdinsight_storm_cluster.go @@ -19,7 +19,6 @@ var hdInsightStormClusterHeadNodeDefinition = azure.HDInsightNodeDefinition{ MaxInstanceCount: 4, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(2)), - Required: true, } var hdInsightStormClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ @@ -28,7 +27,6 @@ var hdInsightStormClusterWorkerNodeDefinition = azure.HDInsightNodeDefinition{ // can't find a hard limit - appears to be limited by the subscription; setting something sensible for now MaxInstanceCount: 9999, CanSpecifyDisks: false, - Required: true, } var hdInsightStormClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition{ @@ -37,7 +35,6 @@ var hdInsightStormClusterZookeeperNodeDefinition = azure.HDInsightNodeDefinition MaxInstanceCount: 3, CanSpecifyDisks: false, FixedTargetInstanceCount: utils.Int32(int32(3)), - Required: true, } func resourceArmHDInsightStormCluster() *schema.Resource { From 93685665fc94bd3c212d72cc79e93dc8b1272e65 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 9 Aug 2019 15:00:10 -0700 Subject: [PATCH 07/19] Add install script --- azurerm/resource_arm_hdinsight_hadoop_cluster.go | 12 ++++++++++++ .../resource_arm_hdinsight_hadoop_cluster_test.go | 1 + azurerm/testdata/hadoop_cluster_empty_node.sh | 2 ++ 3 files changed, 15 insertions(+) create mode 100644 azurerm/testdata/hadoop_cluster_empty_node.sh diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index bd2e3093a1af..b25c32f3d800 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -2,6 +2,7 @@ package azurerm import ( "fmt" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "log" "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" @@ -97,6 +98,13 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "vm_size": azure.SchemaHDInsightNodeDefinitionVMSize(), + + "install_script_action_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, }, }, }, @@ -221,6 +229,10 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf VMSize: utils.String(v["vm_size"].(string)), }, TargetInstanceCount: utils.Int32(1), + ScriptActions: &[]hdinsight.ScriptAction{{ + Name: utils.String("edgenode"), + URI: utils.String(v["install_script_action_uri"].(string)), + }}, }}, }, ApplicationType: utils.String("CustomApplication"), diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index c6f74608be17..c35a418808e8 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -669,6 +669,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { edge_node { vm_size = "Standard_D3_V2" + install_script_action_uri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-hdinsight-linux-with-edge-node/scripts/EmptyNodeSetup.sh" } } } diff --git a/azurerm/testdata/hadoop_cluster_empty_node.sh b/azurerm/testdata/hadoop_cluster_empty_node.sh new file mode 100644 index 000000000000..3215f0b3f34f --- /dev/null +++ b/azurerm/testdata/hadoop_cluster_empty_node.sh @@ -0,0 +1,2 @@ +#! /bin/bash +echo "Empty node setup" \ No newline at end of file From 1b94e3acd4fd76060e79aa6fc47bc50dd6897b45 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 12 Aug 2019 18:06:03 -0700 Subject: [PATCH 08/19] Install scripts --- .../resource_arm_hdinsight_hadoop_cluster.go | 97 +++++++++++++++---- ...ource_arm_hdinsight_hadoop_cluster_test.go | 5 +- 2 files changed, 81 insertions(+), 21 deletions(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index b25c32f3d800..256ffdc45e1d 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -1,15 +1,15 @@ package azurerm import ( - "fmt" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" - "log" - - "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" - "github.com/hashicorp/terraform/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +"fmt" +"github.com/hashicorp/terraform/helper/validation" +"log" + +"github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" +"github.com/hashicorp/terraform/helper/schema" +"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" +"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +"github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) // NOTE: this isn't a recommended way of building resources in Terraform @@ -99,11 +99,41 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "vm_size": azure.SchemaHDInsightNodeDefinitionVMSize(), - "install_script_action_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.NoEmptyStrings, + "install_script_action": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + // TODO test multiple roles + "roles": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "edgenode", + "headnode", + "workernode", + "zookeepernode", + }, false), + }, + Set: schema.HashString, + }, + }, + }, }, }, }, @@ -219,22 +249,21 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf edgeNodeRaw := v.([]interface{}) applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient - v := edgeNodeRaw[0].(map[string]interface{}) + edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) + installScriptActions := expandHDInsightApplicationScriptActions(edgeNodeConfig["install_script_actions"].([]interface{})) + application := hdinsight.Application{ Properties: &hdinsight.ApplicationProperties{ ComputeProfile: &hdinsight.ComputeProfile{ Roles: &[]hdinsight.Role{{ Name: utils.String("edgenode"), HardwareProfile: &hdinsight.HardwareProfile{ - VMSize: utils.String(v["vm_size"].(string)), + VMSize: utils.String(edgeNodeConfig["vm_size"].(string)), }, TargetInstanceCount: utils.Int32(1), - ScriptActions: &[]hdinsight.ScriptAction{{ - Name: utils.String("edgenode"), - URI: utils.String(v["install_script_action_uri"].(string)), - }}, }}, }, + InstallScriptActions: installScriptActions, ApplicationType: utils.String("CustomApplication"), }, } @@ -342,3 +371,31 @@ func flattenHDInsightHadoopComponentVersion(input map[string]*string) []interfac }, } } + +func expandHDInsightApplicationScriptActions(input []interface{}) *[]hdinsight.RuntimeScriptAction { + actions := make([]hdinsight.RuntimeScriptAction, 0) + + for _, v := range input { + val := v.(map[string]interface{}) + + name := val["name"].(string) + uri := val["uri"].(string) + + rolesRaw := val["roles"].(*schema.Set).List() + roles := make([]string, 0) + for _, v := range rolesRaw { + role := v.(string) + roles = append(roles, role) + } + + action := hdinsight.RuntimeScriptAction{ + Name: utils.String(name), + URI: utils.String(uri), + Roles: &roles, + } + + actions = append(actions, action) + } + + return &actions +} diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index c35a418808e8..9a2d3f0f2892 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -669,7 +669,10 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { edge_node { vm_size = "Standard_D3_V2" - install_script_action_uri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-hdinsight-linux-with-edge-node/scripts/EmptyNodeSetup.sh" + install_script_action { + name = "script_action_1" + uri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-hdinsight-linux-with-edge-node/scripts/EmptyNodeSetup.sh" + } } } } From 8baacbf7f7f684c6485e4036e7ed0a79bcd254da Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 12 Aug 2019 18:06:35 -0700 Subject: [PATCH 09/19] Fmt --- .../resource_arm_hdinsight_hadoop_cluster.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 256ffdc45e1d..34a6d7c4a739 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -1,15 +1,15 @@ package azurerm import ( -"fmt" -"github.com/hashicorp/terraform/helper/validation" -"log" - -"github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" -"github.com/hashicorp/terraform/helper/schema" -"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" -"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" -"github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + "fmt" + "github.com/hashicorp/terraform/helper/validation" + "log" + + "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) // NOTE: this isn't a recommended way of building resources in Terraform @@ -264,7 +264,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf }}, }, InstallScriptActions: installScriptActions, - ApplicationType: utils.String("CustomApplication"), + ApplicationType: utils.String("CustomApplication"), }, } future, err := applicationsClient.Create(ctx, resourceGroup, name, name, application) From 5cff846a6614aa44031f13afc8da196a3769b555 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 12 Aug 2019 18:42:35 -0700 Subject: [PATCH 10/19] s -> --- azurerm/resource_arm_hdinsight_hadoop_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 34a6d7c4a739..3e81cb068bcf 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -250,7 +250,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) - installScriptActions := expandHDInsightApplicationScriptActions(edgeNodeConfig["install_script_actions"].([]interface{})) + installScriptActions := expandHDInsightApplicationScriptActions(edgeNodeConfig["install_script_action"].([]interface{})) application := hdinsight.Application{ Properties: &hdinsight.ApplicationProperties{ From 5adb47b15244ae32a5d108ea36893a8d2cef1b96 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Mon, 12 Aug 2019 19:12:59 -0700 Subject: [PATCH 11/19] Test help --- azurerm/resource_arm_hdinsight_hadoop_cluster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 3e81cb068bcf..51661b322ba3 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -267,6 +267,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf ApplicationType: utils.String("CustomApplication"), }, } + return fmt.Errorf("Application: %+v\nInstallScriptAddress: %+v\nInstallScript: %+v", application, application.Properties.InstallScriptActions, *application.Properties.InstallScriptActions) future, err := applicationsClient.Create(ctx, resourceGroup, name, name, application) if err != nil { return fmt.Errorf("Error creating edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) From a60ef10d3c64dfb42d2c55182efe99bcc45e439c Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Tue, 20 Aug 2019 23:05:59 -0700 Subject: [PATCH 12/19] RC1 --- azurerm/common_hdinsight.go | 85 +++++++- azurerm/helpers/azure/hdinsight.go | 186 +++++++++--------- .../resource_arm_hdinsight_hadoop_cluster.go | 174 ++++++++++------ ...ource_arm_hdinsight_hadoop_cluster_test.go | 63 +++++- .../r/hdinsight_hadoop_cluster.html.markdown | 18 ++ 5 files changed, 371 insertions(+), 155 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 36cba27930db..733fb659ad14 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -1,13 +1,15 @@ package azurerm import ( + "context" "fmt" - "log" - "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" + "log" + "time" ) func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema.UpdateFunc { @@ -54,6 +56,38 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } + // The API can add an edge node but can't remove them without force newing the resource. We'll check for adding here + // and can come back to removing if that functionality gets added. https://feedback.azure.com/forums/217335-hdinsight/suggestions/5663773-start-stop-cluster-hdinsight?page=3&per_page=20 + if clusterKind == "Hadoop" { + if d.HasChange("roles.0.edge_node") { + o, n := d.GetChange("roles.0.edge_node.#") + edgeNodeRaw := d.Get("roles.0.edge_node").([]interface{}) + edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) + applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient + + // Create an edge node + if o.(int) < n.(int) { + err := createHDInsightEdgeNode(applicationsClient, ctx, resourceGroup, name, edgeNodeConfig) + if err != nil { + return err + } + + // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. + log.Printf("[DEBUG] Waiting for Hadoop Cluster to %q (Resource Group %q) to finish applying edge node", name, resourceGroup) + stateConf := &resource.StateChangeConf{ + Pending: []string{"AzureVMConfiguration", "Accepted", "HdInsightConfiguration"}, + Target: []string{"Running"}, + Refresh: hdInsightWaitForReadyRefreshFunc(ctx, client, resourceGroup, name), + Timeout: 60 * time.Minute, + MinTimeout: 15 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) + } + } + } + } + return readFunc(d, meta) } } @@ -175,3 +209,50 @@ func flattenHDInsightRoles(d *schema.ResourceData, input *hdinsight.ComputeProfi result, } } + +func createHDInsightEdgeNode(client hdinsight.ApplicationsClient, ctx context.Context, resourceGroup string, name string, input map[string]interface{}) error { + installScriptActions := expandHDInsightApplicationEdgeNodeInstallScriptActions(input["install_script_action"].([]interface{})) + + application := hdinsight.Application{ + Properties: &hdinsight.ApplicationProperties{ + ComputeProfile: &hdinsight.ComputeProfile{ + Roles: &[]hdinsight.Role{{ + Name: utils.String("edgenode"), + HardwareProfile: &hdinsight.HardwareProfile{ + VMSize: utils.String(input["vm_size"].(string)), + }, + // The TargetInstanceCount must be one for edge nodes. + TargetInstanceCount: utils.Int32(1), + }}, + }, + InstallScriptActions: installScriptActions, + ApplicationType: utils.String("CustomApplication"), + }, + } + future, err := client.Create(ctx, resourceGroup, name, name, application) + if err != nil { + return fmt.Errorf("Error creating edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for creation of edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} + +func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ClustersClient, resourceGroupName string, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Get(ctx, resourceGroupName, name) + if err != nil { + return nil, "Error", fmt.Errorf("Error issuing read request in relayNamespaceDeleteRefreshFunc to Relay Namespace %q (Resource Group %q): %s", name, resourceGroupName, err) + } + if props := res.Properties; props != nil { + if state := props.ClusterState; state != nil { + return res, *state, nil + } + } + + return res, "Pending", nil + } +} diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 326019443b07..3b5a35768e4a 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-getter/helper/url" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -212,102 +213,103 @@ type HDInsightNodeDefinition struct { FixedTargetInstanceCount *int32 } -func SchemaHDInsightNodeDefinitionVMSize() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - // short of deploying every VM Sku for every node type for every HDInsight Cluster - // this is the list I've (@tombuildsstuff) found for valid SKU's from an endpoint in the Portal - // using another SKU causes a bad request from the API - as such this is a best effort UX - "ExtraSmall", - "Small", - "Medium", - "Large", - "ExtraLarge", - "A5", - "A6", - "A7", - "A8", - "A9", - "A10", - "A11", - "Standard_A1_V2", - "Standard_A2_V2", - "Standard_A2m_V2", - "Standard_A3", - "Standard_A4_V2", - "Standard_A4m_V2", - "Standard_A8_V2", - "Standard_A8m_V2", - "Standard_D1", - "Standard_D2", - "Standard_D3", - "Standard_D4", - "Standard_D11", - "Standard_D12", - "Standard_D13", - "Standard_D14", - "Standard_D1_V2", - "Standard_D2_V2", - "Standard_D3_V2", - "Standard_D4_V2", - "Standard_D5_V2", - "Standard_D11_V2", - "Standard_D12_V2", - "Standard_D13_V2", - "Standard_D14_V2", - "Standard_DS1_V2", - "Standard_DS2_V2", - "Standard_DS3_V2", - "Standard_DS4_V2", - "Standard_DS5_V2", - "Standard_DS11_V2", - "Standard_DS12_V2", - "Standard_DS13_V2", - "Standard_DS14_V2", - "Standard_E2_V3", - "Standard_E4_V3", - "Standard_E8_V3", - "Standard_E16_V3", - "Standard_E20_V3", - "Standard_E32_V3", - "Standard_E64_V3", - "Standard_E64i_V3", - "Standard_E2s_V3", - "Standard_E4s_V3", - "Standard_E8s_V3", - "Standard_E16s_V3", - "Standard_E20s_V3", - "Standard_E32s_V3", - "Standard_E64s_V3", - "Standard_E64is_V3", - "Standard_G1", - "Standard_G2", - "Standard_G3", - "Standard_G4", - "Standard_G5", - "Standard_F2s_V2", - "Standard_F4s_V2", - "Standard_F8s_V2", - "Standard_F16s_V2", - "Standard_F32s_V2", - "Standard_F64s_V2", - "Standard_F72s_V2", - "Standard_GS1", - "Standard_GS2", - "Standard_GS3", - "Standard_GS4", - "Standard_GS5", - "Standard_NC24", - }, true), - } +func ValidateSchemaHDInsightNodeDefinitionVMSize() schema.SchemaValidateFunc { + return validation.StringInSlice([]string{ + // short of deploying every VM Sku for every node type for every HDInsight Cluster + // this is the list I've (@tombuildsstuff) found for valid SKU's from an endpoint in the Portal + // using another SKU causes a bad request from the API - as such this is a best effort UX + "ExtraSmall", + "Small", + "Medium", + "Large", + "ExtraLarge", + "A5", + "A6", + "A7", + "A8", + "A9", + "A10", + "A11", + "Standard_A1_V2", + "Standard_A2_V2", + "Standard_A2m_V2", + "Standard_A3", + "Standard_A4_V2", + "Standard_A4m_V2", + "Standard_A8_V2", + "Standard_A8m_V2", + "Standard_D1", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D1_V2", + "Standard_D2_V2", + "Standard_D3_V2", + "Standard_D4_V2", + "Standard_D5_V2", + "Standard_D11_V2", + "Standard_D12_V2", + "Standard_D13_V2", + "Standard_D14_V2", + "Standard_DS1_V2", + "Standard_DS2_V2", + "Standard_DS3_V2", + "Standard_DS4_V2", + "Standard_DS5_V2", + "Standard_DS11_V2", + "Standard_DS12_V2", + "Standard_DS13_V2", + "Standard_DS14_V2", + "Standard_E2_V3", + "Standard_E4_V3", + "Standard_E8_V3", + "Standard_E16_V3", + "Standard_E20_V3", + "Standard_E32_V3", + "Standard_E64_V3", + "Standard_E64i_V3", + "Standard_E2s_V3", + "Standard_E4s_V3", + "Standard_E8s_V3", + "Standard_E16s_V3", + "Standard_E20s_V3", + "Standard_E32s_V3", + "Standard_E64s_V3", + "Standard_E64is_V3", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_F2s_V2", + "Standard_F4s_V2", + "Standard_F8s_V2", + "Standard_F16s_V2", + "Standard_F32s_V2", + "Standard_F64s_V2", + "Standard_F72s_V2", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5", + "Standard_NC24", + }, true) } func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNodeDefinition) *schema.Schema { result := map[string]*schema.Schema{ - "vm_size": SchemaHDInsightNodeDefinitionVMSize(), + "vm_size": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: ValidateSchemaHDInsightNodeDefinitionVMSize(), + }, "username": { Type: schema.TypeString, Required: true, diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 51661b322ba3..48bce4523db6 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -2,13 +2,17 @@ package azurerm import ( "fmt" - "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform/helper/resource" "log" + "strings" + "time" "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" "github.com/hashicorp/terraform/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -49,6 +53,37 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { State: schema.ImportStatePassthrough, }, + CustomizeDiff: func(diff *schema.ResourceDiff, v interface{}) error { + // An edge node can be added but can't be update or removed without forcing a new resource to be created + oldEdgeNodeCount, newEdgeNodeCount := diff.GetChange("roles.0.edge_node.#") + oldEdgeNodeInt := oldEdgeNodeCount.(int) + newEdgeNodeInt := newEdgeNodeCount.(int) + + // ForceNew if attempting to remove an edge node + if newEdgeNodeInt < oldEdgeNodeInt { + diff.ForceNew("roles.0.edge_node") + } + + // ForceNew if attempting to update an edge node + if newEdgeNodeInt == 1 && oldEdgeNodeInt == 1 { + // DiffSuppressFunc comes after this check so we need to check if the strings aren't the same sans casing here. + oVMSize, newVMSize := diff.GetChange("roles.0.edge_node.0.vm_size") + if !strings.EqualFold(oVMSize.(string), newVMSize.(string)) { + diff.ForceNew("roles.0.edge_node") + } + + // ForceNew if attempting to update install scripts + oldInstallScriptCount, newInstallScriptCount := diff.GetChange("roles.0.edge_node.0.install_script_action.#") + oldInstallScriptInt := oldInstallScriptCount.(int) + newInstallScriptInt := newInstallScriptCount.(int) + if newInstallScriptInt == oldInstallScriptInt { + diff.ForceNew("roles.0.edge_node.0.install_script_action") + } + } + + return nil + }, + Schema: map[string]*schema.Schema{ "name": azure.SchemaHDInsightName(), @@ -97,40 +132,28 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "vm_size": azure.SchemaHDInsightNodeDefinitionVMSize(), + "vm_size": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: azure.ValidateSchemaHDInsightNodeDefinitionVMSize(), + }, "install_script_action": { Type: schema.TypeList, Required: true, - ForceNew: true, MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, }, "uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // TODO test multiple roles - "roles": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{ - "edgenode", - "headnode", - "workernode", - "zookeepernode", - }, false), - }, - Set: schema.HashString, + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, }, }, }, @@ -244,37 +267,28 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf d.SetId(*read.ID) - // Add edge node after creation + // We can only add an edge node after creation if v, ok := d.GetOk("roles.0.edge_node"); ok { edgeNodeRaw := v.([]interface{}) applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient - edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) - installScriptActions := expandHDInsightApplicationScriptActions(edgeNodeConfig["install_script_action"].([]interface{})) - - application := hdinsight.Application{ - Properties: &hdinsight.ApplicationProperties{ - ComputeProfile: &hdinsight.ComputeProfile{ - Roles: &[]hdinsight.Role{{ - Name: utils.String("edgenode"), - HardwareProfile: &hdinsight.HardwareProfile{ - VMSize: utils.String(edgeNodeConfig["vm_size"].(string)), - }, - TargetInstanceCount: utils.Int32(1), - }}, - }, - InstallScriptActions: installScriptActions, - ApplicationType: utils.String("CustomApplication"), - }, - } - return fmt.Errorf("Application: %+v\nInstallScriptAddress: %+v\nInstallScript: %+v", application, application.Properties.InstallScriptActions, *application.Properties.InstallScriptActions) - future, err := applicationsClient.Create(ctx, resourceGroup, name, name, application) + + err := createHDInsightEdgeNode(applicationsClient, ctx, resourceGroup, name, edgeNodeConfig) if err != nil { - return fmt.Errorf("Error creating edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return err } - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. + log.Printf("[DEBUG] Waiting for Hadoop Cluster to %q (Resource Group %q) to finish applying edge node", name, resourceGroup) + stateConf := &resource.StateChangeConf{ + Pending: []string{"AzureVMConfiguration", "Accepted", "HdInsightConfiguration"}, + Target: []string{"Running"}, + Refresh: hdInsightWaitForReadyRefreshFunc(ctx, client, resourceGroup, name), + Timeout: 60 * time.Minute, + MinTimeout: 15 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } } @@ -337,6 +351,19 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac ZookeeperNodeDef: hdInsightHadoopClusterZookeeperNodeDefinition, } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, hadoopRoles) + + applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient + edgeNode, err := applicationsClient.Get(ctx, resourceGroup, name, name) + if err != nil { + if !utils.ResponseWasNotFound(edgeNode.Response) { + return fmt.Errorf("Error reading edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if edgeNodeProps := edgeNode.Properties; edgeNodeProps != nil { + flattenedRoles = flattenHDInsightEdgeNode(flattenedRoles, edgeNodeProps) + } + if err := d.Set("roles", flattenedRoles); err != nil { return fmt.Errorf("Error flattening `roles`: %+v", err) } @@ -352,6 +379,39 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return nil } +func flattenHDInsightEdgeNode(roles []interface{}, props *hdinsight.ApplicationProperties) []interface{} { + if len(roles) == 0 || props == nil { + return roles + } + + role := roles[0].(map[string]interface{}) + + edgeNode := make(map[string]interface{}) + if computeProfile := props.ComputeProfile; computeProfile != nil { + if roles := computeProfile.Roles; roles != nil { + for _, role := range *roles { + if hardwareProfile := role.HardwareProfile; hardwareProfile != nil { + edgeNode["vm_size"] = hardwareProfile.VMSize + } + } + } + } + + actions := make(map[string]interface{}) + if installScriptActions := props.InstallScriptActions; installScriptActions != nil { + for _, action := range *installScriptActions { + actions["name"] = action.Name + actions["uri"] = action.URI + } + } + + edgeNode["install_script_action"] = []interface{}{actions} + + role["edge_node"] = []interface{}{edgeNode} + + return []interface{}{role} +} + func expandHDInsightHadoopComponentVersion(input []interface{}) map[string]*string { vs := input[0].(map[string]interface{}) return map[string]*string{ @@ -373,7 +433,7 @@ func flattenHDInsightHadoopComponentVersion(input map[string]*string) []interfac } } -func expandHDInsightApplicationScriptActions(input []interface{}) *[]hdinsight.RuntimeScriptAction { +func expandHDInsightApplicationEdgeNodeInstallScriptActions(input []interface{}) *[]hdinsight.RuntimeScriptAction { actions := make([]hdinsight.RuntimeScriptAction, 0) for _, v := range input { @@ -382,17 +442,11 @@ func expandHDInsightApplicationScriptActions(input []interface{}) *[]hdinsight.R name := val["name"].(string) uri := val["uri"].(string) - rolesRaw := val["roles"].(*schema.Set).List() - roles := make([]string, 0) - for _, v := range rolesRaw { - role := v.(string) - roles = append(roles, role) - } - action := hdinsight.RuntimeScriptAction{ - Name: utils.String(name), - URI: utils.String(uri), - Roles: &roles, + Name: utils.String(name), + URI: utils.String(uri), + // The only role available for edge nodes is edgenode + Roles: &[]string{"edgenode"}, } actions = append(actions, action) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 9a2d3f0f2892..72f1e20de414 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -288,6 +288,67 @@ func TestAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_addEdgeNodeBasic(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + { + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "roles.0.edge_node.0.password", + "roles.0.edge_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` @@ -670,7 +731,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { edge_node { vm_size = "Standard_D3_V2" install_script_action { - name = "script_action_1" + name = "script1" uri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-hdinsight-linux-with-edge-node/scripts/EmptyNodeSetup.sh" } } diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index c677ace1e5ab..add041fcaf0e 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -153,6 +153,8 @@ A `roles` block supports the following: * `zookeeper_node` - (Required) A `zookeeper_node` block as defined below. +* `edge_node` - (Optional) A `edge_node` block as defined below. + --- A `storage_account` block supports the following: @@ -211,6 +213,22 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `edge_node` block supports the following: + +* `vm_size` - (Required) The Size of the Virtual Machine which should be used as the Edge Nodes. Changing this forces a new resource to be created. + +* `install_script_action` - A `install_script_action` block as defined below. + +--- + +A `install_script_action` block supports the following: + +* `name` - (Required) The name of the install script action. Changing this forces a new resource to be created. + +* `uri` - (Required) The URI pointing to the script to run during the installation of the edge node. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: From 3abfe6438fd038314aa58542bcfc4f89adef4e27 Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Thu, 22 Aug 2019 07:50:52 -0700 Subject: [PATCH 13/19] Broken tests --- azurerm/common_hdinsight.go | 16 ----- .../resource_arm_hdinsight_hadoop_cluster.go | 61 +++++++++++++++++++ 2 files changed, 61 insertions(+), 16 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 733fb659ad14..1ecf9a56630d 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -240,19 +240,3 @@ func createHDInsightEdgeNode(client hdinsight.ApplicationsClient, ctx context.Co return nil } - -func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ClustersClient, resourceGroupName string, name string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.Get(ctx, resourceGroupName, name) - if err != nil { - return nil, "Error", fmt.Errorf("Error issuing read request in relayNamespaceDeleteRefreshFunc to Relay Namespace %q (Resource Group %q): %s", name, resourceGroupName, err) - } - if props := res.Properties; props != nil { - if state := props.ClusterState; state != nil { - return res, *state, nil - } - } - - return res, "Pending", nil - } -} diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 48bce4523db6..a584f38c2097 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -1,6 +1,7 @@ package azurerm import ( + "context" "fmt" "github.com/hashicorp/terraform/helper/resource" "log" @@ -290,6 +291,19 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } + + /* + edgeNodeStateConf := &resource.StateChangeConf{ + Pending: []string{"Empty"}, + Target: []string{"Ready"}, + Refresh: hdInsightEdgeNodeWaitForReadyRefreshFunc(ctx, applicationsClient, resourceGroup, name), + Timeout: 3 * time.Minute, + MinTimeout: 15 * time.Second, + } + + if _, err := edgeNodeStateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for HDInsight Cluster Edge Node %q (Resource Group %q) to be ready: %s", name, resourceGroup, err) + } */ } return resourceArmHDInsightHadoopClusterRead(d, meta) @@ -353,6 +367,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, hadoopRoles) applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient + edgeNode, err := applicationsClient.Get(ctx, resourceGroup, name, name) if err != nil { if !utils.ResponseWasNotFound(edgeNode.Response) { @@ -454,3 +469,49 @@ func expandHDInsightApplicationEdgeNodeInstallScriptActions(input []interface{}) return &actions } + +func retryHDInsightEdgeNodeGet(resGroup string, name string, meta interface{}) func() *resource.RetryError { + return func() *resource.RetryError { + client := meta.(*ArmClient).hdinsight.ApplicationsClient + ctx := meta.(*ArmClient).StopContext + + if _, err := client.Get(ctx, resGroup, name, name); err != nil { + return resource.RetryableError(err) + } + + return nil + } +} + +func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ClustersClient, resourceGroupName string, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Get(ctx, resourceGroupName, name) + if err != nil { + return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightWaitForReadyRefreshFunc to Hadoop Cluster %q (Resource Group %q): %s", name, resourceGroupName, err) + } + if props := res.Properties; props != nil { + if state := props.ClusterState; state != nil { + return res, *state, nil + } + } + + return res, "Pending", nil + } +} + +func hdInsightEdgeNodeWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ApplicationsClient, resourceGroupName string, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Get(ctx, resourceGroupName, name, name) + if err != nil { + if res.Response.Response != nil { + return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightEdgeNodeWaitForReadyRefreshFunc to Hadoop Cluster Edge Node %q (Resource Group %q): %s", name, resourceGroupName, err) + } + return res, "Empty", nil + } + if props := res.Properties; props != nil { + return nil, "Ready", nil + } + + return res, "Empty", nil + } +} From 300aca83cbb67e961c37138a444e40d0d94e895f Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Sat, 5 Oct 2019 17:15:49 -0400 Subject: [PATCH 14/19] HDInsight edge node support --- azurerm/common_hdinsight.go | 74 ++++++++++++------- .../resource_arm_hdinsight_hadoop_cluster.go | 44 ++++++----- 2 files changed, 72 insertions(+), 46 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 455965dfecd7..1ca7cd6c5b30 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -37,13 +37,13 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } - if d.HasChange("roles") { + if d.HasChange("roles.0.worker_node") { log.Printf("[DEBUG] Resizing the HDInsight %q Cluster", clusterKind) rolesRaw := d.Get("roles").([]interface{}) roles := rolesRaw[0].(map[string]interface{}) - headNodes := roles["worker_node"].([]interface{}) - headNode := headNodes[0].(map[string]interface{}) - targetInstanceCount := headNode["target_instance_count"].(int) + workerNodes := roles["worker_node"].([]interface{}) + workerNode := workerNodes[0].(map[string]interface{}) + targetInstanceCount := workerNode["target_instance_count"].(int) params := hdinsight.ClusterResizeParameters{ TargetInstanceCount: utils.Int32(int32(targetInstanceCount)), } @@ -62,31 +62,37 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema // and can come back to removing if that functionality gets added. https://feedback.azure.com/forums/217335-hdinsight/suggestions/5663773-start-stop-cluster-hdinsight?page=3&per_page=20 if clusterKind == "Hadoop" { if d.HasChange("roles.0.edge_node") { - o, n := d.GetChange("roles.0.edge_node.#") + log.Printf("[DEBUG] Detected change in edge nodes") + o, n := d.GetChange("roles.0.edge_node.0.target_instance_count") edgeNodeRaw := d.Get("roles.0.edge_node").([]interface{}) edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient - // Create an edge node - if o.(int) < n.(int) { - err := createHDInsightEdgeNode(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) - if err != nil { - return err - } - - // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. - log.Printf("[DEBUG] Waiting for Hadoop Cluster to %q (Resource Group %q) to finish applying edge node", name, resourceGroup) - stateConf := &resource.StateChangeConf{ - Pending: []string{"AzureVMConfiguration", "Accepted", "HdInsightConfiguration"}, - Target: []string{"Running"}, - Refresh: hdInsightWaitForReadyRefreshFunc(ctx, client, resourceGroup, name), - Timeout: 60 * time.Minute, - MinTimeout: 15 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) - } + // Note: API currently doesn't support updating number of edge nodes + // if anything in the edge nodes changes, delete edge nodes then recreate them + err := deleteHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name) + if err != nil { + return err } + + err = createHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) + if err != nil { + return err + } + + // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. + log.Printf("[DEBUG] Waiting for Hadoop Cluster to %q (Resource Group %q) to finish applying edge node", name, resourceGroup) + stateConf := &resource.StateChangeConf{ + Pending: []string{"AzureVMConfiguration", "Accepted", "HdInsightConfiguration"}, + Target: []string{"Running"}, + Refresh: hdInsightWaitForReadyRefreshFunc(ctx, client, resourceGroup, name), + Timeout: 60 * time.Minute, + MinTimeout: 15 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) + } + } } @@ -212,7 +218,7 @@ func flattenHDInsightRoles(d *schema.ResourceData, input *hdinsight.ComputeProfi } } -func createHDInsightEdgeNode(ctx context.Context, client *hdinsight.ApplicationsClient, resourceGroup string, name string, input map[string]interface{}) error { +func createHDInsightEdgeNodes(ctx context.Context, client *hdinsight.ApplicationsClient, resourceGroup string, name string, input map[string]interface{}) error { installScriptActions := expandHDInsightApplicationEdgeNodeInstallScriptActions(input["install_script_action"].([]interface{})) application := hdinsight.Application{ @@ -224,7 +230,7 @@ func createHDInsightEdgeNode(ctx context.Context, client *hdinsight.Applications VMSize: utils.String(input["vm_size"].(string)), }, // The TargetInstanceCount must be one for edge nodes. - TargetInstanceCount: utils.Int32(1), + TargetInstanceCount: utils.Int32(int32(input["target_instance_count"].(int))), }}, }, InstallScriptActions: installScriptActions, @@ -233,7 +239,7 @@ func createHDInsightEdgeNode(ctx context.Context, client *hdinsight.Applications } future, err := client.Create(ctx, resourceGroup, name, name, application) if err != nil { - return fmt.Errorf("Error creating edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error creating edge nodes for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { @@ -242,3 +248,17 @@ func createHDInsightEdgeNode(ctx context.Context, client *hdinsight.Applications return nil } + +func deleteHDInsightEdgeNodes(ctx context.Context, client *hdinsight.ApplicationsClient, resourceGroup string, name string) error { + future, err := client.Delete(ctx, resourceGroup, name, name) + + if err != nil { + return fmt.Errorf("Error deleting edge nodes for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for deletion of edge nodes for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 60a4e34e34d4..7964f6eaab32 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/validation" "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" "github.com/hashicorp/terraform/helper/schema" @@ -59,30 +60,26 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { CustomizeDiff: func(diff *schema.ResourceDiff, v interface{}) error { // An edge node can be added but can't be update or removed without forcing a new resource to be created - oldEdgeNodeCount, newEdgeNodeCount := diff.GetChange("roles.0.edge_node.#") + oldEdgeNodeCount, newEdgeNodeCount := diff.GetChange("roles.0.edge_node.0.target_instance_count") oldEdgeNodeInt := oldEdgeNodeCount.(int) newEdgeNodeInt := newEdgeNodeCount.(int) - // ForceNew if attempting to remove an edge node - if newEdgeNodeInt < oldEdgeNodeInt { - diff.ForceNew("roles.0.edge_node") + if oldEdgeNodeInt != newEdgeNodeInt { + diff.ForceNew("roles.0.edge_node.target_instance_count") } - // ForceNew if attempting to update an edge node - if newEdgeNodeInt == 1 && oldEdgeNodeInt == 1 { - // DiffSuppressFunc comes after this check so we need to check if the strings aren't the same sans casing here. - oVMSize, newVMSize := diff.GetChange("roles.0.edge_node.0.vm_size") - if !strings.EqualFold(oVMSize.(string), newVMSize.(string)) { - diff.ForceNew("roles.0.edge_node") - } + // DiffSuppressFunc comes after this check so we need to check if the strings aren't the same sans casing here. + oVMSize, newVMSize := diff.GetChange("roles.0.edge_node.0.vm_size") + if !strings.EqualFold(oVMSize.(string), newVMSize.(string)) { + diff.ForceNew("roles.0.edge_node.0.vm_size") + } - // ForceNew if attempting to update install scripts - oldInstallScriptCount, newInstallScriptCount := diff.GetChange("roles.0.edge_node.0.install_script_action.#") - oldInstallScriptInt := oldInstallScriptCount.(int) - newInstallScriptInt := newInstallScriptCount.(int) - if newInstallScriptInt == oldInstallScriptInt { - diff.ForceNew("roles.0.edge_node.0.install_script_action") - } + // ForceNew if attempting to update install scripts + oldInstallScriptCount, newInstallScriptCount := diff.GetChange("roles.0.edge_node.0.install_script_action.#") + oldInstallScriptInt := oldInstallScriptCount.(int) + newInstallScriptInt := newInstallScriptCount.(int) + if newInstallScriptInt == oldInstallScriptInt { + diff.ForceNew("roles.0.edge_node.0.install_script_action") } return nil @@ -136,6 +133,12 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "target_instance_count": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 25), + }, + "vm_size": { Type: schema.TypeString, Required: true, @@ -277,7 +280,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf applicationsClient := meta.(*ArmClient).hdinsight.ApplicationsClient edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) - err := createHDInsightEdgeNode(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) + err := createHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) if err != nil { return err } @@ -406,6 +409,9 @@ func flattenHDInsightEdgeNode(roles []interface{}, props *hdinsight.ApplicationP if computeProfile := props.ComputeProfile; computeProfile != nil { if roles := computeProfile.Roles; roles != nil { for _, role := range *roles { + if targetInstanceCount := role.TargetInstanceCount; targetInstanceCount != nil { + edgeNode["target_instance_count"] = targetInstanceCount + } if hardwareProfile := role.HardwareProfile; hardwareProfile != nil { edgeNode["vm_size"] = hardwareProfile.VMSize } From 43e2d46d96c4feeaa8840f8f2a06b967d430c624 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Tue, 15 Oct 2019 09:41:59 -0400 Subject: [PATCH 15/19] Address lint errors --- azurerm/common_hdinsight.go | 1 - .../resource_arm_hdinsight_hadoop_cluster.go | 52 +++++++++---------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 55da74d076c9..a59b1f08af71 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -93,7 +93,6 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } - } } diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index f89b3e2339f0..236bf57bfd0f 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -488,18 +488,18 @@ func expandHDInsightApplicationEdgeNodeInstallScriptActions(input []interface{}) return &actions } -func retryHDInsightEdgeNodeGet(resGroup string, name string, meta interface{}) func() *resource.RetryError { - return func() *resource.RetryError { - client := meta.(*ArmClient).HDInsight.ApplicationsClient - ctx := meta.(*ArmClient).StopContext +// func retryHDInsightEdgeNodeGet(resGroup string, name string, meta interface{}) func() *resource.RetryError { +// return func() *resource.RetryError { +// client := meta.(*ArmClient).HDInsight.ApplicationsClient +// ctx := meta.(*ArmClient).StopContext - if _, err := client.Get(ctx, resGroup, name, name); err != nil { - return resource.RetryableError(err) - } +// if _, err := client.Get(ctx, resGroup, name, name); err != nil { +// return resource.RetryableError(err) +// } - return nil - } -} +// return nil +// } +// } func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client *hdinsight.ClustersClient, resourceGroupName string, name string) resource.StateRefreshFunc { return func() (interface{}, string, error) { @@ -517,19 +517,19 @@ func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client *hdinsight.Clu } } -func hdInsightEdgeNodeWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ApplicationsClient, resourceGroupName string, name string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.Get(ctx, resourceGroupName, name, name) - if err != nil { - if res.Response.Response != nil { - return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightEdgeNodeWaitForReadyRefreshFunc to Hadoop Cluster Edge Node %q (Resource Group %q): %s", name, resourceGroupName, err) - } - return res, "Empty", nil - } - if props := res.Properties; props != nil { - return nil, "Ready", nil - } - - return res, "Empty", nil - } -} +// func hdInsightEdgeNodeWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ApplicationsClient, resourceGroupName string, name string) resource.StateRefreshFunc { +// return func() (interface{}, string, error) { +// res, err := client.Get(ctx, resourceGroupName, name, name) +// if err != nil { +// if res.Response.Response != nil { +// return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightEdgeNodeWaitForReadyRefreshFunc to Hadoop Cluster Edge Node %q (Resource Group %q): %s", name, resourceGroupName, err) +// } +// return res, "Empty", nil +// } +// if props := res.Properties; props != nil { +// return nil, "Ready", nil +// } + +// return res, "Empty", nil +// } +// } From 228a938422352fc3bc13341fe0b0eafe144b67b2 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Mon, 4 Nov 2019 16:50:27 -0500 Subject: [PATCH 16/19] Updated to latest master, fixed edge node tests --- azurerm/common_hdinsight.go | 2 +- azurerm/resource_arm_hdinsight_hadoop_cluster_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index a59b1f08af71..088a9bcd8997 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -235,7 +235,7 @@ func createHDInsightEdgeNodes(ctx context.Context, client *hdinsight.Application }}, }, InstallScriptActions: installScriptActions, - ApplicationType: hdinsight.CustomApplication, + ApplicationType: utils.String("CustomApplication"), }, } future, err := client.Create(ctx, resourceGroup, name, name, application) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index b8a9ee9b6fa1..c347c20c82a5 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -691,7 +691,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { resource_group_name = "${azurerm_resource_group.test.name}" location = "${azurerm_resource_group.test.location}" cluster_version = "3.6" - tier = "Premium" + tier = "Standard" component_version { hadoop = "2.7" @@ -730,6 +730,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } edge_node { + target_instance_count = 2 vm_size = "Standard_D3_V2" install_script_action { name = "script1" From e1962d17275130f01785d2dce45a7ccd234eb6d5 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Fri, 8 Nov 2019 15:46:36 -0500 Subject: [PATCH 17/19] Add a test that updates edge nodes --- azurerm/common_hdinsight.go | 1 - ...ource_arm_hdinsight_hadoop_cluster_test.go | 38 +++++++++++++++---- 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 088a9bcd8997..0f873c010c2a 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -230,7 +230,6 @@ func createHDInsightEdgeNodes(ctx context.Context, client *hdinsight.Application HardwareProfile: &hdinsight.HardwareProfile{ VMSize: utils.String(input["vm_size"].(string)), }, - // The TargetInstanceCount must be one for edge nodes. TargetInstanceCount: utils.Int32(int32(input["target_instance_count"].(int))), }}, }, diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index c347c20c82a5..9d1e1c865a62 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -262,7 +262,7 @@ func TestAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(t *testing.T) { CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), Steps: []resource.TestStep{ { - Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location), + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location, 2, "Standard_D3_V2"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMHDInsightClusterExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), @@ -323,7 +323,31 @@ func TestAccAzureRMHDInsightHadoopCluster_addEdgeNodeBasic(t *testing.T) { }, }, { - Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location), + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location, 1, "Standard_D3_V2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "roles.0.edge_node.0.password", + "roles.0.edge_node.0.vm_size", + "storage_account", + }, + }, + { + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location, 3, "Standard_D4_V2"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMHDInsightClusterExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), @@ -681,8 +705,8 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, rInt, rInt, rInt) } -func testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(rInt int, rString string, location string) string { - template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) +func testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(rInt int, rString string, location string, numEdgeNodes int, instanceType string) string { + template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location, instanceType) return fmt.Sprintf(` %s @@ -730,8 +754,8 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } edge_node { - target_instance_count = 2 - vm_size = "Standard_D3_V2" + target_instance_count = %d + vm_size = "%s" install_script_action { name = "script1" uri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-hdinsight-linux-with-edge-node/scripts/EmptyNodeSetup.sh" @@ -739,7 +763,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } } } -`, template, rInt) +`, template, rInt, rInt, rString) } func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { From 4ef1c859e6c458d2418fddec3e7af7eacb813e3c Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Sun, 10 Nov 2019 13:57:33 -0500 Subject: [PATCH 18/19] Remove CustomizeDiff logic --- azurerm/common_hdinsight.go | 20 +++++++++---- .../resource_arm_hdinsight_hadoop_cluster.go | 30 ------------------- ...ource_arm_hdinsight_hadoop_cluster_test.go | 4 +-- 3 files changed, 16 insertions(+), 38 deletions(-) diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 0f873c010c2a..f1db69841f44 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -69,16 +69,24 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) applicationsClient := meta.(*ArmClient).HDInsight.ApplicationsClient + oldEdgeNodeCount, newEdgeNodeCount := d.GetChange("roles.0.edge_node.0.target_instance_count") + oldEdgeNodeInt := oldEdgeNodeCount.(int) + newEdgeNodeInt := newEdgeNodeCount.(int) + // Note: API currently doesn't support updating number of edge nodes // if anything in the edge nodes changes, delete edge nodes then recreate them - err := deleteHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name) - if err != nil { - return err + if oldEdgeNodeInt != 0 { + err := deleteHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name) + if err != nil { + return err + } } - err = createHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) - if err != nil { - return err + if newEdgeNodeInt != 0 { + err = createHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) + if err != nil { + return err + } } // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 236bf57bfd0f..930e5199f301 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -4,14 +4,11 @@ import ( "context" "fmt" "log" - "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - // "github.com/hashicorp/terraform/helper/resource" - "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" @@ -60,33 +57,6 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, - - CustomizeDiff: func(diff *schema.ResourceDiff, v interface{}) error { - // An edge node can be added but can't be update or removed without forcing a new resource to be created - oldEdgeNodeCount, newEdgeNodeCount := diff.GetChange("roles.0.edge_node.0.target_instance_count") - oldEdgeNodeInt := oldEdgeNodeCount.(int) - newEdgeNodeInt := newEdgeNodeCount.(int) - - if oldEdgeNodeInt != newEdgeNodeInt { - diff.ForceNew("roles.0.edge_node.target_instance_count") - } - - // DiffSuppressFunc comes after this check so we need to check if the strings aren't the same sans casing here. - oVMSize, newVMSize := diff.GetChange("roles.0.edge_node.0.vm_size") - if !strings.EqualFold(oVMSize.(string), newVMSize.(string)) { - diff.ForceNew("roles.0.edge_node.0.vm_size") - } - - // ForceNew if attempting to update install scripts - oldInstallScriptCount, newInstallScriptCount := diff.GetChange("roles.0.edge_node.0.install_script_action.#") - oldInstallScriptInt := oldInstallScriptCount.(int) - newInstallScriptInt := newInstallScriptCount.(int) - if newInstallScriptInt == oldInstallScriptInt { - diff.ForceNew("roles.0.edge_node.0.install_script_action") - } - - return nil - }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), Read: schema.DefaultTimeout(5 * time.Minute), diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 9d1e1c865a62..d6e4eb416185 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -706,7 +706,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } func testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(rInt int, rString string, location string, numEdgeNodes int, instanceType string) string { - template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location, instanceType) + template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` %s @@ -763,7 +763,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } } } -`, template, rInt, rInt, rString) +`, template, rInt, numEdgeNodes, instanceType) } func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { From 5bf7ce4a2df3e32f1fa671a626a674696c4ce6f4 Mon Sep 17 00:00:00 2001 From: Daniel Intskirveli Date: Mon, 11 Nov 2019 13:31:59 -0500 Subject: [PATCH 19/19] Cleanup imports, remove commented code --- .../resource_arm_hdinsight_hadoop_cluster.go | 48 +------------------ 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 930e5199f301..aa749d3f163f 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -6,11 +6,10 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" @@ -277,19 +276,6 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } - - /* - edgeNodeStateConf := &resource.StateChangeConf{ - Pending: []string{"Empty"}, - Target: []string{"Ready"}, - Refresh: hdInsightEdgeNodeWaitForReadyRefreshFunc(ctx, applicationsClient, resourceGroup, name), - Timeout: 3 * time.Minute, - MinTimeout: 15 * time.Second, - } - - if _, err := edgeNodeStateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for HDInsight Cluster Edge Node %q (Resource Group %q) to be ready: %s", name, resourceGroup, err) - } */ } return resourceArmHDInsightHadoopClusterRead(d, meta) @@ -458,19 +444,6 @@ func expandHDInsightApplicationEdgeNodeInstallScriptActions(input []interface{}) return &actions } -// func retryHDInsightEdgeNodeGet(resGroup string, name string, meta interface{}) func() *resource.RetryError { -// return func() *resource.RetryError { -// client := meta.(*ArmClient).HDInsight.ApplicationsClient -// ctx := meta.(*ArmClient).StopContext - -// if _, err := client.Get(ctx, resGroup, name, name); err != nil { -// return resource.RetryableError(err) -// } - -// return nil -// } -// } - func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client *hdinsight.ClustersClient, resourceGroupName string, name string) resource.StateRefreshFunc { return func() (interface{}, string, error) { res, err := client.Get(ctx, resourceGroupName, name) @@ -486,20 +459,3 @@ func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client *hdinsight.Clu return res, "Pending", nil } } - -// func hdInsightEdgeNodeWaitForReadyRefreshFunc(ctx context.Context, client hdinsight.ApplicationsClient, resourceGroupName string, name string) resource.StateRefreshFunc { -// return func() (interface{}, string, error) { -// res, err := client.Get(ctx, resourceGroupName, name, name) -// if err != nil { -// if res.Response.Response != nil { -// return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightEdgeNodeWaitForReadyRefreshFunc to Hadoop Cluster Edge Node %q (Resource Group %q): %s", name, resourceGroupName, err) -// } -// return res, "Empty", nil -// } -// if props := res.Properties; props != nil { -// return nil, "Ready", nil -// } - -// return res, "Empty", nil -// } -// }