diff --git a/vsphere/helper_test.go b/vsphere/helper_test.go index 05b6c5c4d..483e2f3d2 100644 --- a/vsphere/helper_test.go +++ b/vsphere/helper_test.go @@ -871,7 +871,7 @@ func testGetHostFromDataSource(s *terraform.State, resourceName string) (*object } // testGetComputeClusterVMGroup is a convenience method to fetch a virtual -// machine group override in a (compute) cluster. +// machine group in a (compute) cluster. func testGetComputeClusterVMGroup(s *terraform.State, resourceName string) (*types.ClusterVmGroup, error) { vars, err := testClientVariablesForResource(s, fmt.Sprintf("%s.%s", resourceVSphereComputeClusterVMGroupName, resourceName)) if err != nil { @@ -894,3 +894,28 @@ func testGetComputeClusterVMGroup(s *terraform.State, resourceName string) (*typ return resourceVSphereComputeClusterVMGroupFindEntry(cluster, name) } + +// testGetComputeClusterHostGroup is a convenience method to fetch a host group +// in a (compute) cluster. +func testGetComputeClusterHostGroup(s *terraform.State, resourceName string) (*types.ClusterHostGroup, error) { + vars, err := testClientVariablesForResource(s, fmt.Sprintf("%s.%s", resourceVSphereComputeClusterHostGroupName, resourceName)) + if err != nil { + return nil, err + } + + if vars.resourceID == "" { + return nil, errors.New("resource ID is empty") + } + + clusterID, name, err := resourceVSphereComputeClusterHostGroupParseID(vars.resourceID) + if err != nil { + return nil, err + } + + cluster, err := clustercomputeresource.FromID(vars.client, clusterID) + if err != nil { + return nil, err + } + + return resourceVSphereComputeClusterHostGroupFindEntry(cluster, name) +} diff --git a/vsphere/internal/helper/structure/structure_helper.go b/vsphere/internal/helper/structure/structure_helper.go index 4c8ce8666..4e5e57661 100644 --- a/vsphere/internal/helper/structure/structure_helper.go +++ b/vsphere/internal/helper/structure/structure_helper.go @@ -47,7 +47,7 @@ func SliceStringsToInterfaces(s []string) []interface{} { } // SliceInterfacesToManagedObjectReferences converts an interface slice into a -// slice of ManagedObjectReferences with the type of t +// slice of ManagedObjectReferences with the type of t. func SliceInterfacesToManagedObjectReferences(s []interface{}, t string) []types.ManagedObjectReference { var d []types.ManagedObjectReference for _, v := range s { @@ -59,6 +59,19 @@ func SliceInterfacesToManagedObjectReferences(s []interface{}, t string) []types return d } +// SliceStringsToManagedObjectReferences converts a string slice into a slice +// of ManagedObjectReferences with the type of t. +func SliceStringsToManagedObjectReferences(s []string, t string) []types.ManagedObjectReference { + var d []types.ManagedObjectReference + for _, v := range s { + d = append(d, types.ManagedObjectReference{ + Type: t, + Value: v, + }) + } + return d +} + // MergeSchema merges the map[string]*schema.Schema from src into dst. Safety // against conflicts is enforced by panicing. func MergeSchema(dst, src map[string]*schema.Schema) { diff --git a/vsphere/provider.go b/vsphere/provider.go index 7a7c7cfa6..49a34b331 100644 --- a/vsphere/provider.go +++ b/vsphere/provider.go @@ -89,6 +89,7 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "vsphere_compute_cluster": resourceVSphereComputeCluster(), + "vsphere_compute_cluster_host_group": resourceVSphereComputeClusterHostGroup(), "vsphere_compute_cluster_vm_group": resourceVSphereComputeClusterVMGroup(), "vsphere_custom_attribute": resourceVSphereCustomAttribute(), "vsphere_datacenter": resourceVSphereDatacenter(), diff --git a/vsphere/resource_vsphere_compute_cluster_host_group.go b/vsphere/resource_vsphere_compute_cluster_host_group.go new file mode 100644 index 000000000..6c46749db --- /dev/null +++ b/vsphere/resource_vsphere_compute_cluster_host_group.go @@ -0,0 +1,375 @@ +package vsphere + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/clustercomputeresource" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/structure" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/viapi" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +const resourceVSphereComputeClusterHostGroupName = "vsphere_compute_cluster_host_group" + +func resourceVSphereComputeClusterHostGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceVSphereComputeClusterHostGroupCreate, + Read: resourceVSphereComputeClusterHostGroupRead, + Update: resourceVSphereComputeClusterHostGroupUpdate, + Delete: resourceVSphereComputeClusterHostGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceVSphereComputeClusterHostGroupImport, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The unique name of the virtual machine group in the cluster.", + }, + "compute_cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The managed object ID of the cluster.", + }, + "host_system_ids": { + Type: schema.TypeSet, + Optional: true, + Description: "The managed object IDs of the hosts.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceVSphereComputeClusterHostGroupCreate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] %s: Beginning create", resourceVSphereComputeClusterHostGroupIDString(d)) + + cluster, name, err := resourceVSphereComputeClusterHostGroupObjects(d, meta) + if err != nil { + return err + } + + info, err := expandClusterHostGroup(d, name) + if err != nil { + return err + } + spec := &types.ClusterConfigSpecEx{ + GroupSpec: []types.ClusterGroupSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationAdd, + }, + Info: info, + }, + }, + } + + if err = clustercomputeresource.Reconfigure(cluster, spec); err != nil { + return err + } + + id, err := resourceVSphereComputeClusterHostGroupFlattenID(cluster, name) + if err != nil { + return fmt.Errorf("cannot compute ID of created resource: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] %s: Create finished successfully", resourceVSphereComputeClusterHostGroupIDString(d)) + return resourceVSphereComputeClusterHostGroupRead(d, meta) +} + +func resourceVSphereComputeClusterHostGroupRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] %s: Beginning read", resourceVSphereComputeClusterHostGroupIDString(d)) + + cluster, name, err := resourceVSphereComputeClusterHostGroupObjects(d, meta) + if err != nil { + return err + } + + info, err := resourceVSphereComputeClusterHostGroupFindEntry(cluster, name) + if err != nil { + return err + } + + if info == nil { + // The configuration is missing, blank out the ID so it can be re-created. + d.SetId("") + return nil + } + + // Save the compute_cluster_id and name here. These are + // ForceNew, but we set these for completeness on import so that if the wrong + // cluster/VM combo was used, it will be noted. + if err = d.Set("compute_cluster_id", cluster.Reference().Value); err != nil { + return fmt.Errorf("error setting attribute \"compute_cluster_id\": %s", err) + } + + // This is the "correct" way to set name here, even if it's a bit + // superfluous. + if err = d.Set("name", info.Name); err != nil { + return fmt.Errorf("error setting attribute \"name\": %s", err) + } + + if err = flattenClusterHostGroup(d, info); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Read completed successfully", resourceVSphereComputeClusterHostGroupIDString(d)) + return nil +} + +func resourceVSphereComputeClusterHostGroupUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] %s: Beginning update", resourceVSphereComputeClusterHostGroupIDString(d)) + + cluster, name, err := resourceVSphereComputeClusterHostGroupObjects(d, meta) + if err != nil { + return err + } + + info, err := expandClusterHostGroup(d, name) + if err != nil { + return err + } + spec := &types.ClusterConfigSpecEx{ + GroupSpec: []types.ClusterGroupSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationEdit, + }, + Info: info, + }, + }, + } + + if err := clustercomputeresource.Reconfigure(cluster, spec); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Update finished successfully", resourceVSphereComputeClusterHostGroupIDString(d)) + return resourceVSphereComputeClusterHostGroupRead(d, meta) +} + +func resourceVSphereComputeClusterHostGroupDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] %s: Beginning delete", resourceVSphereComputeClusterHostGroupIDString(d)) + + cluster, name, err := resourceVSphereComputeClusterHostGroupObjects(d, meta) + if err != nil { + return err + } + + spec := &types.ClusterConfigSpecEx{ + GroupSpec: []types.ClusterGroupSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationRemove, + RemoveKey: name, + }, + }, + }, + } + + if err := clustercomputeresource.Reconfigure(cluster, spec); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Deleted successfully", resourceVSphereComputeClusterHostGroupIDString(d)) + return nil +} + +func resourceVSphereComputeClusterHostGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + var data map[string]string + if err := json.Unmarshal([]byte(d.Id()), &data); err != nil { + return nil, err + } + clusterPath, ok := data["compute_cluster_path"] + if !ok { + return nil, errors.New("missing compute_cluster_path in input data") + } + name, ok := data["name"] + if !ok { + return nil, errors.New("missing name in input data") + } + + client, err := resourceVSphereComputeClusterHostGroupClient(meta) + if err != nil { + return nil, err + } + + cluster, err := clustercomputeresource.FromPath(client, clusterPath, nil) + if err != nil { + return nil, fmt.Errorf("cannot locate cluster %q: %s", clusterPath, err) + } + + info, err := resourceVSphereComputeClusterHostGroupFindEntry(cluster, name) + if err != nil { + return nil, err + } + + if info == nil { + return nil, fmt.Errorf("cluster group entry %q does not exist in cluster %q", name, cluster.Name()) + } + + id, err := resourceVSphereComputeClusterHostGroupFlattenID(cluster, name) + if err != nil { + return nil, fmt.Errorf("cannot compute ID of imported resource: %s", err) + } + d.SetId(id) + return []*schema.ResourceData{d}, nil +} + +// expandClusterHostGroup reads certain ResourceData keys and returns a +// ClusterHostGroup. +func expandClusterHostGroup(d *schema.ResourceData, name string) (*types.ClusterHostGroup, error) { + obj := &types.ClusterHostGroup{ + ClusterGroupInfo: types.ClusterGroupInfo{ + Name: name, + UserCreated: structure.BoolPtr(true), + }, + Host: structure.SliceInterfacesToManagedObjectReferences(d.Get("host_system_ids").(*schema.Set).List(), "HostSystem"), + } + return obj, nil +} + +// flattenClusterHostGroup saves a ClusterHostGroup into the supplied ResourceData. +func flattenClusterHostGroup(d *schema.ResourceData, obj *types.ClusterHostGroup) error { + var hostIDs []string + for _, v := range obj.Host { + hostIDs = append(hostIDs, v.Value) + } + + return structure.SetBatch(d, map[string]interface{}{ + "host_system_ids": hostIDs, + }) +} + +// resourceVSphereComputeClusterHostGroupIDString prints a friendly string for the +// vsphere_cluster_host_group resource. +func resourceVSphereComputeClusterHostGroupIDString(d structure.ResourceIDStringer) string { + return structure.ResourceIDString(d, resourceVSphereComputeClusterHostGroupName) +} + +// resourceVSphereComputeClusterHostGroupFlattenID makes an ID for the +// vsphere_cluster_host_group resource. +func resourceVSphereComputeClusterHostGroupFlattenID(cluster *object.ClusterComputeResource, name string) (string, error) { + clusterID := cluster.Reference().Value + return strings.Join([]string{clusterID, name}, ":"), nil +} + +// resourceVSphereComputeClusterHostGroupParseID parses an ID for the +// vsphere_cluster_host_group and outputs its parts. +func resourceVSphereComputeClusterHostGroupParseID(id string) (string, string, error) { + parts := strings.SplitN(id, ":", 3) + if len(parts) < 2 { + return "", "", fmt.Errorf("bad ID %q", id) + } + return parts[0], parts[1], nil +} + +// resourceVSphereComputeClusterHostGroupFindEntry attempts to locate an +// existing host group in a cluster's configuration. It's used by the +// resource's read functionality and tests. nil is returned if the entry cannot +// be found. +func resourceVSphereComputeClusterHostGroupFindEntry( + cluster *object.ClusterComputeResource, + name string, +) (*types.ClusterHostGroup, error) { + props, err := clustercomputeresource.Properties(cluster) + if err != nil { + return nil, fmt.Errorf("error fetching cluster properties: %s", err) + } + + for _, info := range props.ConfigurationEx.(*types.ClusterConfigInfoEx).Group { + if info.GetClusterGroupInfo().Name == name { + if hostInfo, ok := info.(*types.ClusterHostGroup); ok { + log.Printf("[DEBUG] Found host group %q in cluster %q", name, cluster.Name()) + return hostInfo, nil + } + return nil, fmt.Errorf("unique group name %q in cluster %q is not a host group", name, cluster.Name()) + } + } + + log.Printf("[DEBUG] No host group name %q found in cluster %q", name, cluster.Name()) + return nil, nil +} + +// resourceVSphereComputeClusterHostGroupObjects handles the fetching of the +// cluster and group name depending on what attributes are available: +// * If the resource ID is available, the data is derived from the ID. +// * If not, it's derived from the compute_cluster_id and name attributes. +func resourceVSphereComputeClusterHostGroupObjects( + d *schema.ResourceData, + meta interface{}, +) (*object.ClusterComputeResource, string, error) { + if d.Id() != "" { + return resourceVSphereComputeClusterHostGroupObjectsFromID(d, meta) + } + return resourceVSphereComputeClusterHostGroupObjectsFromAttributes(d, meta) +} + +func resourceVSphereComputeClusterHostGroupObjectsFromAttributes( + d *schema.ResourceData, + meta interface{}, +) (*object.ClusterComputeResource, string, error) { + return resourceVSphereComputeClusterHostGroupFetchObjects( + meta, + d.Get("compute_cluster_id").(string), + d.Get("name").(string), + ) +} + +func resourceVSphereComputeClusterHostGroupObjectsFromID( + d structure.ResourceIDStringer, + meta interface{}, +) (*object.ClusterComputeResource, string, error) { + // Note that this function uses structure.ResourceIDStringer to satisfy + // interfacer. Adding exceptions in the comments does not seem to work. + // Change this back to ResourceData if it's needed in the future. + clusterID, name, err := resourceVSphereComputeClusterHostGroupParseID(d.Id()) + if err != nil { + return nil, "", err + } + + return resourceVSphereComputeClusterHostGroupFetchObjects(meta, clusterID, name) +} + +// resourceVSphereComputeClusterHostGroupFetchObjects fetches the "objects" for +// a cluster host group. This is currently just the cluster object as the name +// of the group is a static value and a pass-through - this is to keep its +// workflow consistent with other cluster-dependent resources that derive from +// ArrayUpdateSpec that have managed object as keys, such as VM and host +// overrides. +func resourceVSphereComputeClusterHostGroupFetchObjects( + meta interface{}, + clusterID string, + name string, +) (*object.ClusterComputeResource, string, error) { + client, err := resourceVSphereComputeClusterHostGroupClient(meta) + if err != nil { + return nil, "", err + } + + cluster, err := clustercomputeresource.FromID(client, clusterID) + if err != nil { + return nil, "", fmt.Errorf("cannot locate cluster: %s", err) + } + + return cluster, name, nil +} + +func resourceVSphereComputeClusterHostGroupClient(meta interface{}) (*govmomi.Client, error) { + client := meta.(*VSphereClient).vimClient + if err := viapi.ValidateVirtualCenter(client); err != nil { + return nil, err + } + return client, nil +} diff --git a/vsphere/resource_vsphere_compute_cluster_host_group_test.go b/vsphere/resource_vsphere_compute_cluster_host_group_test.go new file mode 100644 index 000000000..c38e97cf7 --- /dev/null +++ b/vsphere/resource_vsphere_compute_cluster_host_group_test.go @@ -0,0 +1,280 @@ +package vsphere + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "sort" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/structure" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/viapi" + "github.com/vmware/govmomi/vim25/types" +) + +func TestAccResourceVSphereComputeClusterHostGroup_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereComputeClusterHostGroupPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereComputeClusterHostGroupExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereComputeClusterHostGroupConfig(2), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterHostGroupExists(true), + testAccResourceVSphereComputeClusterHostGroupMatchMembership(), + ), + }, + }, + }) +} + +func TestAccResourceVSphereComputeClusterHostGroup_update(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereComputeClusterHostGroupPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereComputeClusterHostGroupExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereComputeClusterHostGroupConfig(2), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterHostGroupExists(true), + testAccResourceVSphereComputeClusterHostGroupMatchMembership(), + ), + }, + { + Config: testAccResourceVSphereComputeClusterHostGroupConfig(3), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterHostGroupExists(true), + testAccResourceVSphereComputeClusterHostGroupMatchMembership(), + ), + }, + }, + }) +} + +func TestAccResourceVSphereComputeClusterHostGroup_import(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereComputeClusterHostGroupPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereComputeClusterHostGroupExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereComputeClusterHostGroupConfig(1), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterHostGroupExists(true), + testAccResourceVSphereComputeClusterHostGroupMatchMembership(), + ), + }, + { + ResourceName: "vsphere_compute_cluster_host_group.cluster_host_group", + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: func(s *terraform.State) (string, error) { + cluster, err := testGetComputeCluster(s, "cluster") + if err != nil { + return "", err + } + + rs, ok := s.RootModule().Resources["vsphere_compute_cluster_host_group.cluster_host_group"] + if !ok { + return "", errors.New("no resource at address vsphere_compute_cluster_host_group.cluster_host_group") + } + name, ok := rs.Primary.Attributes["name"] + if !ok { + return "", errors.New("vsphere_compute_cluster_host_group.cluster_host_group has no name attribute") + } + + m := make(map[string]string) + m["compute_cluster_path"] = cluster.InventoryPath + m["name"] = name + b, err := json.Marshal(m) + if err != nil { + return "", err + } + + return string(b), nil + }, + Config: testAccResourceVSphereComputeClusterHostGroupConfig(1), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterHostGroupExists(true), + testAccResourceVSphereComputeClusterHostGroupMatchMembership(), + ), + }, + }, + }) +} + +func testAccResourceVSphereComputeClusterHostGroupPreCheck(t *testing.T) { + if os.Getenv("VSPHERE_DATACENTER") == "" { + t.Skip("set VSPHERE_DATACENTER to run vsphere_compute_cluster_host_group acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST5") == "" { + t.Skip("set VSPHERE_ESXI_HOST5 to run vsphere_compute_cluster_host_group acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST6") == "" { + t.Skip("set VSPHERE_ESXI_HOST6 to run vsphere_compute_cluster_host_group acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST7") == "" { + t.Skip("set VSPHERE_ESXI_HOST7 to run vsphere_compute_cluster_host_group acceptance tests") + } +} + +func testAccResourceVSphereComputeClusterHostGroupExists(expected bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + info, err := testGetComputeClusterHostGroup(s, "cluster_host_group") + if err != nil { + if expected == false { + if viapi.IsManagedObjectNotFoundError(err) { + // This is not necessarily a missing group, but more than likely a + // missing cluster, which happens during destroy as the dependent + // resources will be missing as well, so want to treat this as a + // deleted override as well. + return nil + } + } + return err + } + + switch { + case info == nil && !expected: + // Expected missing + return nil + case info == nil && expected: + // Expected to exist + return errors.New("cluster host group missing when expected to exist") + case !expected: + return errors.New("cluster host group still present when expected to be missing") + } + + return nil + } +} + +func testAccResourceVSphereComputeClusterHostGroupMatchMembership() resource.TestCheckFunc { + return func(s *terraform.State) error { + actual, err := testGetComputeClusterHostGroup(s, "cluster_host_group") + if err != nil { + return err + } + + if actual == nil { + return errors.New("cluster host group missing") + } + + hosts, err := testAccResourceVSphereComputeClusterHostGroupMatchMembershipHostIDs(s) + if err != nil { + return err + } + + expectedSort := structure.MoRefSorter(hosts) + sort.Sort(expectedSort) + + expected := &types.ClusterHostGroup{ + ClusterGroupInfo: types.ClusterGroupInfo{ + Name: actual.Name, + UserCreated: actual.UserCreated, + }, + Host: []types.ManagedObjectReference(expectedSort), + } + + actualSort := structure.MoRefSorter(actual.Host) + sort.Sort(actualSort) + actual.Host = []types.ManagedObjectReference(actualSort) + + if !reflect.DeepEqual(expected, actual) { + return spew.Errorf("expected %#v got %#v", expected, actual) + } + + return nil + } +} + +func testAccResourceVSphereComputeClusterHostGroupMatchMembershipHostIDs(s *terraform.State) ([]types.ManagedObjectReference, error) { + var ids []string + if rs, ok := s.RootModule().Resources["data.vsphere_host.hosts"]; ok { + ids = []string{rs.Primary.ID} + } else { + ids = testAccResourceVSphereComputeClusterHostGroupGetMultiple(s) + } + + return structure.SliceStringsToManagedObjectReferences(ids, "HostSystem"), nil +} + +func testAccResourceVSphereComputeClusterHostGroupGetMultiple(s *terraform.State) []string { + var i int + var ids []string + for { + rs, ok := s.RootModule().Resources[fmt.Sprintf("data.vsphere_host.hosts.%d", i)] + if !ok { + break + } + ids = append(ids, rs.Primary.ID) + i++ + } + return ids +} + +func testAccResourceVSphereComputeClusterHostGroupConfig(count int) string { + return fmt.Sprintf(` +variable "datacenter" { + default = "%s" +} + +variable "hosts" { + default = [ + "%s", + "%s", + "%s", + ] +} + +variable "host_count" { + default = "%d" +} + +data "vsphere_datacenter" "dc" { + name = "${var.datacenter}" +} + +data "vsphere_host" "hosts" { + count = "${var.host_count}" + name = "${var.hosts[count.index]}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +resource "vsphere_compute_cluster" "cluster" { + name = "terraform-compute-cluster-test" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + host_system_ids = ["${data.vsphere_host.hosts.*.id}"] + + force_evacuate_on_destroy = true +} + +resource "vsphere_compute_cluster_host_group" "cluster_host_group" { + name = "terraform-test-cluster-group" + compute_cluster_id = "${vsphere_compute_cluster.cluster.id}" + host_system_ids = ["${data.vsphere_host.hosts.*.id}"] +} +`, + os.Getenv("VSPHERE_DATACENTER"), + os.Getenv("VSPHERE_ESXI_HOST5"), + os.Getenv("VSPHERE_ESXI_HOST6"), + os.Getenv("VSPHERE_ESXI_HOST7"), + count, + ) +} diff --git a/vsphere/resource_vsphere_compute_cluster_vm_group.go b/vsphere/resource_vsphere_compute_cluster_vm_group.go index c0d14bfa9..c5b2cf262 100644 --- a/vsphere/resource_vsphere_compute_cluster_vm_group.go +++ b/vsphere/resource_vsphere_compute_cluster_vm_group.go @@ -297,9 +297,9 @@ func resourceVSphereComputeClusterVMGroupParseID(id string) (string, string, err return parts[0], parts[1], nil } -// resourceVSphereComputeClusterVMGroupFindEntry attempts to locate an existing DRS VM -// config in a cluster's configuration. It's used by the resource's read -// functionality and tests. nil is returned if the entry cannot be found. +// resourceVSphereComputeClusterVMGroupFindEntry attempts to locate an existing +// VM group config in a cluster's configuration. It's used by the resource's +// read functionality and tests. nil is returned if the entry cannot be found. func resourceVSphereComputeClusterVMGroupFindEntry( cluster *object.ClusterComputeResource, name string, diff --git a/website/docs/r/compute_cluster.html.markdown b/website/docs/r/compute_cluster.html.markdown index 313010d0d..0e5ada3df 100644 --- a/website/docs/r/compute_cluster.html.markdown +++ b/website/docs/r/compute_cluster.html.markdown @@ -120,7 +120,7 @@ and require vCenter. The following settings control cluster membership or tune how hosts are managed within the cluster itself by Terraform. -* `host_system_ids` - (Optional) The [managed object ID][docs-about-morefs] of +* `host_system_ids` - (Optional) The [managed object IDs][docs-about-morefs] of the hosts to put in the cluster. * `host_cluster_exit_timeout` - The timeout for each host maintenance mode operation when removing hosts from a cluster. The value is specified in diff --git a/website/docs/r/compute_cluster_host_group.html.markdown b/website/docs/r/compute_cluster_host_group.html.markdown new file mode 100644 index 000000000..c3b8d2724 --- /dev/null +++ b/website/docs/r/compute_cluster_host_group.html.markdown @@ -0,0 +1,118 @@ +--- +layout: "vsphere" +page_title: "VMware vSphere: vsphere_compute_cluster_host_group" +sidebar_current: "docs-vsphere-resource-storage-storage-drs-vm-override" +description: |- + Provides a VMware vSphere cluster virtual machine group. This can be used to manage groups of virtual machines for relevant rules in a cluster. +--- + +# vsphere\_compute\_cluster\_host\_group + +The `vsphere_compute_cluster_host_group` resource can be used to manage groups +of hosts in a cluster, either created by the +[`vsphere_compute_cluster`][tf-vsphere-cluster-resource] resource or looked up +by the [`vsphere_compute_cluster`][tf-vsphere-cluster-data-source] data source. + +[tf-vsphere-cluster-resource]: /docs/providers/vsphere/r/compute_cluster.html +[tf-vsphere-cluster-data-source]: /docs/providers/vsphere/d/compute_cluster.html + +This resource mainly serves as an input to the +[`vsphere_compute_cluster_vm_host_rule`][tf-vsphere-cluster-vm-host-rule-resource] +resource - see the documentation for that resource for further details on how +to use host groups. + +[tf-vsphere-cluster-vm-host-rule-resource]: /docs/providers/vsphere/d/compute_cluster_vm_host_rule.html + +## Example Usage + +The example below is the exact same configuration as the +[example][tf-vsphere-cluster-resource-example] in the +[`vsphere_compute_cluster`][tf-vsphere-cluster-resource] resource, but in +addition, it creates a host group with the same hosts that get put into the +cluster. + +[tf-vsphere-cluster-resource-example]: /docs/providers/vsphere/r/compute_cluster.html#example-usage + +```hcl +variable "datacenter" { + default = "dc1" +} + +variable "hosts" { + default = [ + "esxi1", + "esxi2", + "esxi3", + ] +} + +data "vsphere_datacenter" "dc" { + name = "${var.datacenter}" +} + +data "vsphere_host" "hosts" { + count = "${length(var.hosts)}" + name = "${var.hosts[count.index]}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +resource "vsphere_compute_cluster" "compute_cluster" { + name = "terraform-compute-cluster-test" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + host_system_ids = ["${data.vsphere_host.hosts.*.id}"] + + drs_enabled = true + drs_automation_level = "fullyAutomated" + + ha_enabled = true +} + +resource "vsphere_compute_cluster_host_group" "cluster_host_group" { + name = "terraform-test-cluster-host-group" + compute_cluster_id = "${vsphere_compute_cluster.compute_cluster.id}" + host_system_ids = ["${data.vsphere_host.hosts.*.id}"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the host group. This must be unique in the + cluster. Forces a new resource if changed. +* `compute_cluster_id` - (Required) The [managed object reference + ID][docs-about-morefs] of the cluster to put the group in. Forces a new + resource if changed. + +[docs-about-morefs]: /docs/providers/vsphere/index.html#use-of-managed-object-references-by-the-vsphere-provider + +* `host_system_ids` - (Optional) The [managed object IDs][docs-about-morefs] of + the hosts to put in the cluster. + +~> **NOTE:** The namespace for cluster names on this resource (defined by the +[`name`](#name) argument) is shared with the +[`vsphere_compute_cluster_vm_group`][tf-vsphere-cluster-vm-group-resource] +resource. Make sure your names are unique across both resources. + +[tf-vsphere-cluster-vm-group-resource]: /docs/providers/vsphere/r/compute_cluster_vm_group.html + +## Attribute Reference + +The only attribute this resource exports is the `id` of the resource, which is +a combination of the [managed object reference ID][docs-about-morefs] of the +cluster, and the name of the host group. + +## Importing + +An existing group can be [imported][docs-import] into this resource by +supplying both the path to the cluster, and the name of the host group. If the +name or cluster is not found, or if the group is of a different type, an error +will be returned. An example is below: + +[docs-import]: https://www.terraform.io/docs/import/index.html + +``` +terraform import vsphere_compute_cluster_host_group.cluster_host_group \ + '{"compute_cluster_path": "/dc1/host/cluster1", \ + "name": "terraform-test-cluster-host-group"}' +``` diff --git a/website/docs/r/compute_cluster_vm_group.html.markdown b/website/docs/r/compute_cluster_vm_group.html.markdown index 245172beb..2187ca17d 100644 --- a/website/docs/r/compute_cluster_vm_group.html.markdown +++ b/website/docs/r/compute_cluster_vm_group.html.markdown @@ -102,6 +102,13 @@ The following arguments are supported: * `virtual_machine_ids` - (Required) The UUIDs of the virtual machines in this group. +~> **NOTE:** The namespace for cluster names on this resource (defined by the +[`name`](#name) argument) is shared with the +[`vsphere_compute_cluster_host_group`][tf-vsphere-cluster-host-group-resource] +resource. Make sure your names are unique across both resources. + +[tf-vsphere-cluster-host-group-resource]: /docs/providers/vsphere/r/compute_cluster_host_group.html + ## Attribute Reference The only attribute this resource exports is the `id` of the resource, which is diff --git a/website/vsphere.erb b/website/vsphere.erb index 1249b07eb..1a73e1a16 100644 --- a/website/vsphere.erb +++ b/website/vsphere.erb @@ -70,6 +70,9 @@ > vsphere_compute_cluster + > + vsphere_compute_cluster_host_group + > vsphere_compute_cluster_vm_group