diff --git a/.secrets.baseline b/.secrets.baseline index 6e106327ea..e87f277185 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "go.sum|^.secrets.baseline$", "lines": null }, - "generated_at": "2022-03-16T19:18:39Z", + "generated_at": "2022-03-23T07:53:32Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -632,7 +632,7 @@ "hashed_secret": "9184b0c38101bf24d78b2bb0d044deb1d33696fc", "is_secret": false, "is_verified": false, - "line_number": 120, + "line_number": 121, "type": "Secret Keyword", "verified_result": null }, @@ -640,7 +640,7 @@ "hashed_secret": "c427f185ddcb2440be9b77c8e45f1cd487a2e790", "is_secret": false, "is_verified": false, - "line_number": 1308, + "line_number": 1323, "type": "Base64 High Entropy String", "verified_result": null }, @@ -648,7 +648,7 @@ "hashed_secret": "1f7e33de15e22de9d2eaf502df284ed25ca40018", "is_secret": false, "is_verified": false, - "line_number": 1376, + "line_number": 1391, "type": "Secret Keyword", "verified_result": null }, @@ -656,7 +656,7 @@ "hashed_secret": "1f614c2eb6b3da22d89bd1b9fd47d7cb7c8fc670", "is_secret": false, "is_verified": false, - "line_number": 2922, + "line_number": 2958, "type": "Secret Keyword", "verified_result": null }, @@ -664,7 +664,7 @@ "hashed_secret": "7abfce65b8504403afc25c9790f358d513dfbcc6", "is_secret": false, "is_verified": false, - "line_number": 2935, + "line_number": 2971, "type": "Secret Keyword", "verified_result": null }, @@ -672,7 +672,7 @@ "hashed_secret": "0c2d85bf9a9b1579b16f220a4ea8c3d62b2e24b1", "is_secret": false, "is_verified": false, - "line_number": 2976, + "line_number": 3012, "type": "Secret Keyword", "verified_result": null } @@ -682,7 +682,7 @@ "hashed_secret": "da8cae6284528565678de15e03d461e23fe22538", "is_secret": false, "is_verified": false, - "line_number": 1507, + "line_number": 1513, "type": "Secret Keyword", "verified_result": null } @@ -692,7 +692,7 @@ "hashed_secret": "c8b6f5ef11b9223ac35a5663975a466ebe7ebba9", "is_secret": false, "is_verified": false, - "line_number": 1178, + "line_number": 1187, "type": "Secret Keyword", "verified_result": null }, @@ -700,7 +700,7 @@ "hashed_secret": "8abf4899c01104241510ba87685ad4de76b0c437", "is_secret": false, "is_verified": false, - "line_number": 1184, + "line_number": 1193, "type": "Secret Keyword", "verified_result": null } @@ -1063,6 +1063,16 @@ "verified_result": null } ], + "ibm/service/cis/resource_ibm_cis_logpush_job_test.go": [ + { + "hashed_secret": "c541b3639ea7f56f2e547a752f000be347779048", + "is_secret": false, + "is_verified": false, + "line_number": 51, + "type": "Hex High Entropy String", + "verified_result": null + } + ], "ibm/service/cis/resource_ibm_cis_waf_group_test.go": [ { "hashed_secret": "ece6e4a51cf5a18845f07c95832586a96d5fcf4c", diff --git a/ibm/service/database/resource_ibm_database.go b/ibm/service/database/resource_ibm_database.go index a7910d94b3..5844df50c6 100644 --- a/ibm/service/database/resource_ibm_database.go +++ b/ibm/service/database/resource_ibm_database.go @@ -11,10 +11,14 @@ import ( "net/url" "os" "reflect" + "regexp" + "sort" "strings" "time" rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -26,6 +30,8 @@ import ( "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + "github.com/IBM/cloud-databases-go-sdk/clouddatabasesv5" + "github.com/IBM/go-sdk-core/v5/core" ) const ( @@ -85,13 +91,17 @@ func retryTask(f func() (icdv4.Task, error)) (task icdv4.Task, err error) { func ResourceIBMDatabaseInstance() *schema.Resource { return &schema.Resource{ - Create: resourceIBMDatabaseInstanceCreate, - Read: resourceIBMDatabaseInstanceRead, - Update: resourceIBMDatabaseInstanceUpdate, - Delete: resourceIBMDatabaseInstanceDelete, + CreateContext: resourceIBMDatabaseInstanceCreate, + ReadContext: resourceIBMDatabaseInstanceRead, + UpdateContext: resourceIBMDatabaseInstanceUpdate, + DeleteContext: resourceIBMDatabaseInstanceDelete, Exists: resourceIBMDatabaseInstanceExists, - CustomizeDiff: resourceIBMDatabaseInstanceDiff, - Importer: &schema.ResourceImporter{}, + + CustomizeDiff: customdiff.All( + resourceIBMDatabaseInstanceDiff, + checkV5Groups), + + Importer: &schema.ResourceImporter{}, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -124,6 +134,7 @@ func ResourceIBMDatabaseInstance() *schema.Resource { Description: "The name of the Cloud Internet database service", Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: validate.ValidateAllowedStringValues([]string{"databases-for-etcd", "databases-for-postgresql", "databases-for-redis", "databases-for-elasticsearch", "databases-for-mongodb", "messages-for-rabbitmq", "databases-for-mysql", "databases-for-cassandra", "databases-for-enterprisedb"}), }, "plan": { @@ -190,28 +201,28 @@ func ResourceIBMDatabaseInstance() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count"}, + ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count", "group"}, }, "members_disk_allocation_mb": { Description: "Disk allocation required for cluster", Type: schema.TypeInt, Optional: true, Computed: true, - ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count"}, + ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count", "group"}, }, "members_cpu_allocation_count": { Description: "CPU allocation required for cluster", Type: schema.TypeInt, Optional: true, Computed: true, - ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count"}, + ConflictsWith: []string{"node_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count", "group"}, }, "node_count": { Description: "Total number of nodes in the cluster", Type: schema.TypeInt, Optional: true, Computed: true, - ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count", "group"}, }, "node_memory_allocation_mb": { Description: "Memory allocation per node", @@ -219,21 +230,21 @@ func ResourceIBMDatabaseInstance() *schema.Resource { Optional: true, Computed: true, - ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count", "group"}, }, "node_disk_allocation_mb": { Description: "Disk allocation per node", Type: schema.TypeInt, Optional: true, Computed: true, - ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count", "group"}, }, "node_cpu_allocation_count": { Description: "CPU allocation per node", Type: schema.TypeInt, Optional: true, Computed: true, - ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count"}, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count", "group"}, }, "plan_validation": { Description: "For elasticsearch and postgres perform database parameter validation during the plan phase. Otherwise, database parameter validation happens in apply phase.", @@ -424,6 +435,71 @@ func ResourceIBMDatabaseInstance() *schema.Resource { }, }, }, + "group": { + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"members_memory_allocation_mb", "members_disk_allocation_mb", "members_cpu_allocation_count", "node_memory_allocation_mb", "node_disk_allocation_mb", "node_cpu_allocation_count", "node_count"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_id": { + Required: true, + Type: schema.TypeString, + }, + "members": { + Optional: true, + Type: schema.TypeSet, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allocation_count": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "memory": { + Optional: true, + Type: schema.TypeSet, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allocation_mb": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "disk": { + Optional: true, + Type: schema.TypeSet, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allocation_mb": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "cpu": { + Optional: true, + Type: schema.TypeSet, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allocation_count": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + }, + }, + }, "groups": { Type: schema.TypeList, Computed: true, @@ -786,6 +862,53 @@ type Params struct { PITRTimeStamp string `json:"point_in_time_recovery_time,omitempty"` } +type Group struct { + ID string + Members *GroupResource + Memory *GroupResource + Disk *GroupResource + CPU *GroupResource +} + +type GroupResource struct { + Units string + Allocation int + Minimum int + Maximum int + StepSize int + IsAdjustable bool + CanScaleDown bool +} + +func getDefaultScalingGroups(_service string, meta interface{}) (groups []clouddatabasesv5.Group, err error) { + cloudDatabasesClient, err := meta.(conns.ClientSession).CloudDatabasesV5() + if err != nil { + return groups, fmt.Errorf("[ERROR] Error getting database client settings: %s", err) + } + + re := regexp.MustCompile("(?:messages|databases)-for-([a-z]+)") + match := re.FindStringSubmatch(_service) + + if match == nil { + return groups, fmt.Errorf("[ERROR] Error invalid service name: %s", _service) + } + + service := match[1] + + if service == "cassandra" { + service = "datastax_enterprise_full" + } + + getDefaultScalingGroupsOptions := cloudDatabasesClient.NewGetDefaultScalingGroupsOptions(service) + + getDefaultScalingGroupsResponse, _, err := cloudDatabasesClient.GetDefaultScalingGroups(getDefaultScalingGroupsOptions) + if err != nil { + return groups, err + } + + return getDefaultScalingGroupsResponse.Groups, nil +} + func getDatabaseServiceDefaults(service string, meta interface{}) (*icdv4.Group, error) { icdClient, err := meta.(conns.ClientSession).ICDAPI() if err != nil { @@ -808,36 +931,60 @@ func getDatabaseServiceDefaults(service string, meta interface{}) (*icdv4.Group, return &groupDefaults.Groups[0], nil } -func getInitialNodeCount(d *schema.ResourceData, meta interface{}) (int, error) { - service := d.Get("service").(string) - planPhase := d.Get("plan_validation").(bool) - if planPhase { - groupDefaults, err := getDatabaseServiceDefaults(service, meta) - if err != nil { - return 0, err - } - return groupDefaults.Members.MinimumCount, nil - } else { - if service == "databases-for-elasticsearch" { - return 3, nil - } else if service == "databases-for-cassandra" { - return 3, nil +func getInitialNodeCount(service string, meta interface{}) (int, error) { + groups, err := getDefaultScalingGroups(service, meta) + + if err != nil { + return 0, err + } + + for _, g := range groups { + if *g.ID == "member" { + return int(*g.Members.MinimumCount), nil } - return 2, nil } + + return 0, fmt.Errorf("getInitialNodeCount failed for member group") } -type GroupLimit struct { - Units string - Allocation int - Minimum int - Maximum int - StepSize int - IsAdjustable bool - CanScaleDown bool +func getGroups(instanceID string, meta interface{}) (groups []clouddatabasesv5.Group, err error) { + cloudDatabasesClient, err := meta.(conns.ClientSession).CloudDatabasesV5() + if err != nil { + return nil, err + } + + listDeploymentScalingGroupsOptions := &clouddatabasesv5.ListDeploymentScalingGroupsOptions{ + ID: &instanceID, + } + + groupsResponse, _, err := cloudDatabasesClient.ListDeploymentScalingGroups(listDeploymentScalingGroupsOptions) + if err != nil { + return groups, err + } + + return groupsResponse.Groups, nil +} + +func checkGroupScaling(groupId string, resourceName string, value int, resource *GroupResource, nodeCount int) error { + if nodeCount == 0 { + nodeCount = 1 + } + if resource.StepSize == 0 { + return fmt.Errorf("%s group must have members scaled > 0 before scaling %s", groupId, resourceName) + } + if value < resource.Minimum/nodeCount || value > resource.Maximum/nodeCount || value%(resource.StepSize/nodeCount) != 0 { + return fmt.Errorf("%s group %s must be >= %d and <= %d in increments of %d", groupId, resourceName, resource.Minimum/nodeCount, resource.Maximum/nodeCount, resource.StepSize/nodeCount) + } + if value != resource.Allocation/nodeCount && !resource.IsAdjustable { + return fmt.Errorf("%s can not change %s value after create", groupId, resourceName) + } + if value < resource.Allocation/nodeCount && !resource.CanScaleDown { + return fmt.Errorf("can not scale %s group %s below %d to %d", groupId, resourceName, resource.Allocation/nodeCount, value) + } + return nil } -func checkGroupValue(name string, limits GroupLimit, divider int, diff *schema.ResourceDiff) error { +func checkGroupValue(name string, limits GroupResource, divider int, diff *schema.ResourceDiff) error { if diff.HasChange(name) { oldSetting, newSetting := diff.GetChange(name) old := oldSetting.(int) @@ -868,7 +1015,7 @@ type CountLimit struct { } func checkCountValue(name string, limits CountLimit, divider int, diff *schema.ResourceDiff) error { - groupLimit := GroupLimit{ + groupLimit := GroupResource{ Units: limits.Units, Allocation: limits.AllocationCount, Minimum: limits.MinimumCount, @@ -891,7 +1038,7 @@ type MbLimit struct { } func checkMbValue(name string, limits MbLimit, divider int, diff *schema.ResourceDiff) error { - groupLimit := GroupLimit{ + groupLimit := GroupResource{ Units: limits.Units, Allocation: limits.AllocationMb, Minimum: limits.MinimumMb, @@ -903,19 +1050,20 @@ func checkMbValue(name string, limits MbLimit, divider int, diff *schema.Resourc return checkGroupValue(name, groupLimit, divider, diff) } -func resourceIBMDatabaseInstanceDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - - err := flex.ResourceTagsCustomizeDiff(diff) +func resourceIBMDatabaseInstanceDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) (err error) { + err = flex.ResourceTagsCustomizeDiff(diff) if err != nil { return err } service := diff.Get("service").(string) - if service == "databases-for-postgresql" || service == "databases-for-elasticsearch" || service == "databases-for-cassandra" || service == "databases-for-enterprisedb" { - planPhase := diff.Get("plan_validation").(bool) + planPhase := diff.Get("plan_validation").(bool) + if service == "databases-for-postgresql" || + service == "databases-for-elasticsearch" || + service == "databases-for-cassandra" || + service == "databases-for-enterprisedb" { if planPhase { - groupDefaults, err := getDatabaseServiceDefaults(service, meta) if err != nil { return err @@ -975,10 +1123,10 @@ func resourceIBMDatabaseInstanceDiff(_ context.Context, diff *schema.ResourceDif } // Replace with func wrapper for resourceIBMResourceInstanceCreate specifying serviceName := "database......." -func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { +func resourceIBMDatabaseInstanceCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { rsConClient, err := meta.(conns.ClientSession).ResourceControllerV2API() if err != nil { - return err + return diag.FromErr(err) } serviceName := d.Get("service").(string) @@ -992,27 +1140,27 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) rsCatClient, err := meta.(conns.ClientSession).ResourceCatalogAPI() if err != nil { - return err + return diag.FromErr(err) } rsCatRepo := rsCatClient.ResourceCatalog() serviceOff, err := rsCatRepo.FindByName(serviceName, true) if err != nil { - return fmt.Errorf("[ERROR] Error retrieving database service offering: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error retrieving database service offering: %s", err)) } servicePlan, err := rsCatRepo.GetServicePlanID(serviceOff[0], plan) if err != nil { - return fmt.Errorf("[ERROR] Error retrieving plan: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error retrieving plan: %s", err)) } rsInst.ResourcePlanID = &servicePlan deployments, err := rsCatRepo.ListDeployments(servicePlan) if err != nil { - return fmt.Errorf("[ERROR] Error retrieving deployment for plan %s : %s", plan, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error retrieving deployment for plan %s : %s", plan, err)) } if len(deployments) == 0 { - return fmt.Errorf("[ERROR] No deployment found for service plan : %s", plan) + return diag.FromErr(fmt.Errorf("[ERROR] No deployment found for service plan : %s", plan)) } deployments, supportedLocations := filterDatabaseDeployments(deployments, location) @@ -1021,7 +1169,7 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) for l := range supportedLocations { locationList = append(locationList, l) } - return fmt.Errorf("[ERROR] No deployment found for service plan %s at location %s.\nValid location(s) are: %q", plan, location, locationList) + return diag.FromErr(fmt.Errorf("[ERROR] No deployment found for service plan %s at location %s.\nValid location(s) are: %q", plan, location, locationList)) } catalogCRN := deployments[0].CatalogCRN rsInst.Target = &catalogCRN @@ -1032,17 +1180,41 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } else { defaultRg, err := flex.DefaultResourceGroup(meta) if err != nil { - return err + return diag.FromErr(err) } rsInst.ResourceGroup = &defaultRg } - initialNodeCount, err := getInitialNodeCount(d, meta) + initialNodeCount, err := getInitialNodeCount(serviceName, meta) if err != nil { - return err + return diag.FromErr(err) } params := Params{} + if group, ok := d.GetOk("group"); ok { + groups := expandGroups(group.(*schema.Set).List()) + var memberGroup *Group + for _, g := range groups { + if g.ID == "member" { + memberGroup = g + break + } + } + + if memberGroup != nil { + if memberGroup.Memory != nil { + params.Memory = memberGroup.Memory.Allocation * initialNodeCount + } + + if memberGroup.Disk != nil { + params.Disk = memberGroup.Disk.Allocation * initialNodeCount + } + + if memberGroup.CPU != nil { + params.CPU = memberGroup.CPU.Allocation * initialNodeCount + } + } + } if memory, ok := d.GetOk("members_memory_allocation_mb"); ok { params.Memory = memory.(int) } @@ -1095,29 +1267,81 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) instance, response, err := rsConClient.CreateResourceInstance(&rsInst) if err != nil { - return fmt.Errorf("[ERROR] Error creating database instance: %s %s", err, response) + return diag.FromErr( + fmt.Errorf("[ERROR] Error creating database instance: %s %s", err, response)) } d.SetId(*instance.ID) _, err = waitForDatabaseInstanceCreate(d, meta, *instance.ID) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for create database instance (%s) to complete: %s", *instance.ID, err) + return diag.FromErr( + fmt.Errorf( + "[ERROR] Error waiting for create database instance (%s) to complete: %s", *instance.ID, err)) + } + + cloudDatabasesClient, err := meta.(conns.ClientSession).CloudDatabasesV5() + if err != nil { + return diag.FromErr(err) } if node_count, ok := d.GetOk("node_count"); ok { if initialNodeCount != node_count { icdClient, err := meta.(conns.ClientSession).ICDAPI() if err != nil { - return fmt.Errorf("[ERROR] Error getting database client settings: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database client settings: %s", err)) } err = horizontalScale(d, meta, icdClient) if err != nil { - return err + return diag.FromErr(err) + } + } + } + + if group, ok := d.GetOk("group"); ok { + groups := expandGroups(group.(*schema.Set).List()) + for _, g := range groups { + groupScaling := &clouddatabasesv5.GroupScaling{} + nodeCount := initialNodeCount + + if (g.ID == "member") && + (g.Members != nil) && + (nodeCount == g.Members.Allocation) { + // No Horizontal Scaling needed + continue + } + if g.Members != nil { + groupScaling.Members = &clouddatabasesv5.GroupScalingMembers{AllocationCount: core.Int64Ptr(int64(g.Members.Allocation))} + nodeCount = g.Members.Allocation + } + if g.Memory != nil { + groupScaling.Memory = &clouddatabasesv5.GroupScalingMemory{AllocationMb: core.Int64Ptr(int64(g.Memory.Allocation * nodeCount))} + } + if g.Disk != nil { + groupScaling.Disk = &clouddatabasesv5.GroupScalingDisk{AllocationMb: core.Int64Ptr(int64(g.Disk.Allocation * nodeCount))} + } + if g.CPU != nil { + groupScaling.CPU = &clouddatabasesv5.GroupScalingCPU{AllocationCount: core.Int64Ptr(int64(g.CPU.Allocation * nodeCount))} + } + + setDeploymentScalingGroupOptions := &clouddatabasesv5.SetDeploymentScalingGroupOptions{ + ID: instance.ID, + GroupID: &g.ID, + Group: groupScaling, + } + + setDeploymentScalingGroupResponse, _, err := cloudDatabasesClient.SetDeploymentScalingGroup(setDeploymentScalingGroupOptions) + + taskIDLink := *setDeploymentScalingGroupResponse.Task.ID + + _, err = waitForDatabaseTaskComplete(taskIDLink, d, meta, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return diag.FromErr(err) } } } + v := os.Getenv("IC_ENV_TAGS") if _, ok := d.GetOk("tags"); ok || v != "" { oldList, newList := d.GetChange("tags") @@ -1131,7 +1355,7 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) icdId := flex.EscapeUrlParm(*instance.ID) icdClient, err := meta.(conns.ClientSession).ICDAPI() if err != nil { - return fmt.Errorf("[ERROR] Error getting database client settings: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database client settings: %s", err)) } if pw, ok := d.GetOk("adminpassword"); ok { @@ -1139,9 +1363,9 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) cdb, err := icdClient.Cdbs().GetCdb(icdId) if err != nil { if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { - return fmt.Errorf("[ERROR] The database instance was not found in the region set for the Provider, or the default of us-south. Specify the correct region in the provider definition, or create a provider alias for the correct region. %v", err) + return diag.FromErr(fmt.Errorf("[ERROR] The database instance was not found in the region set for the Provider, or the default of us-south. Specify the correct region in the provider definition, or create a provider alias for the correct region. %v", err)) } - return fmt.Errorf("[ERROR] Error getting database config while updating adminpassword for: %s with error %s", icdId, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database config while updating adminpassword for: %s with error %s", icdId, err)) } userParams := icdv4.UserReq{ @@ -1151,12 +1375,12 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } task, err := icdClient.Users().UpdateUser(icdId, cdb.AdminUser, userParams) if err != nil { - return fmt.Errorf("[ERROR] Error updating database admin password: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database admin password: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for update of database (%s) admin password task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for update of database (%s) admin password task to complete: %s", icdId, err)) } } @@ -1171,12 +1395,12 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } task, err := icdClient.Whitelists().CreateWhitelist(icdId, whitelistReq) if err != nil { - return fmt.Errorf("[ERROR] Error updating database whitelist entry: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database whitelist entry: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for update of database (%s) whitelist task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for update of database (%s) whitelist task to complete: %s", icdId, err)) } } } @@ -1184,17 +1408,17 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) params := icdv4.AutoscalingSetGroup{} cpuBody, err := expandICDAutoScalingGroup(d, cpuRecord, "cpu") if err != nil { - return fmt.Errorf("[ERROR] Error in getting cpuBody from expandICDAutoScalingGroup %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error in getting cpuBody from expandICDAutoScalingGroup %s", err)) } params.Autoscaling.CPU = &cpuBody task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database cpu auto_scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database cpu auto_scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) cpu auto_scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) cpu auto_scaling group update task to complete: %s", icdId, err)) } } @@ -1202,17 +1426,17 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) params := icdv4.AutoscalingSetGroup{} diskBody, err := expandICDAutoScalingGroup(d, diskRecord, "disk") if err != nil { - return fmt.Errorf("[ERROR] Error in getting diskBody from expandICDAutoScalingGroup %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error in getting diskBody from expandICDAutoScalingGroup %s", err)) } params.Autoscaling.Disk = &diskBody task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database disk auto_scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database disk auto_scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) disk auto_scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) disk auto_scaling group update task to complete: %s", icdId, err)) } } @@ -1220,19 +1444,18 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) params := icdv4.AutoscalingSetGroup{} memoryBody, err := expandICDAutoScalingGroup(d, memoryRecord, "memory") if err != nil { - return fmt.Errorf("[ERROR] Error in getting memoryBody from expandICDAutoScalingGroup %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error in getting memoryBody from expandICDAutoScalingGroup %s", err)) } params.Autoscaling.Memory = &memoryBody task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database memory auto_scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database memory auto_scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) memory auto_scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) memory auto_scaling group update task to complete: %s", icdId, err)) } - } if userlist, ok := d.GetOk("users"); ok { @@ -1246,23 +1469,23 @@ func resourceIBMDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } task, err := icdClient.Users().CreateUser(icdId, userReq) if err != nil { - return fmt.Errorf("[ERROR] Error updating database user (%s) entry: %s", user.UserName, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database user (%s) entry: %s", user.UserName, err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for update of database (%s) user (%s) create task to complete: %s", icdId, user.UserName, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for update of database (%s) user (%s) create task to complete: %s", icdId, user.UserName, err)) } } } - return resourceIBMDatabaseInstanceRead(d, meta) + return resourceIBMDatabaseInstanceRead(context, d, meta) } -func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { +func resourceIBMDatabaseInstanceRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { rsConClient, err := meta.(conns.ClientSession).ResourceControllerV2API() if err != nil { - return err + return diag.FromErr(err) } instanceID := d.Id() @@ -1278,7 +1501,7 @@ func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e d.SetId("") return nil } - return fmt.Errorf("[ERROR] Error retrieving resource instance: %s %s", err, response) + return diag.FromErr(fmt.Errorf("[ERROR] Error retrieving resource instance: %s %s", err, response)) } if strings.Contains(*instance.State, "removed") { log.Printf("[WARN] Removing instance from TF state because it's now in removed state") @@ -1320,48 +1543,48 @@ func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e rcontroller, err := flex.GetBaseController(meta) if err != nil { - return err + return diag.FromErr(err) } d.Set(flex.ResourceControllerURL, rcontroller+"/services/"+url.QueryEscape(*instance.CRN)) rsCatClient, err := meta.(conns.ClientSession).ResourceCatalogAPI() if err != nil { - return err + return diag.FromErr(err) } rsCatRepo := rsCatClient.ResourceCatalog() serviceOff, err := rsCatRepo.GetServiceName(*instance.ResourceID) if err != nil { - return fmt.Errorf("[ERROR] Error retrieving service offering: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error retrieving service offering: %s", err)) } d.Set("service", serviceOff) servicePlan, err := rsCatRepo.GetServicePlanName(*instance.ResourcePlanID) if err != nil { - return fmt.Errorf("[ERROR] Error retrieving plan: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error retrieving plan: %s", err)) } d.Set("plan", servicePlan) icdClient, err := meta.(conns.ClientSession).ICDAPI() if err != nil { - return fmt.Errorf("[ERROR] Error getting database client settings: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database client settings: %s", err)) } icdId := flex.EscapeUrlParm(instanceID) cdb, err := icdClient.Cdbs().GetCdb(icdId) if err != nil { if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { - return fmt.Errorf("[ERROR] The database instance was not found in the region set for the Provider. Specify the correct region in the provider definition. %v", err) + return diag.FromErr(fmt.Errorf("[ERROR] The database instance was not found in the region set for the Provider. Specify the correct region in the provider definition. %v", err)) } - return fmt.Errorf("[ERROR] Error getting database config for: %s with error %s", icdId, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database config for: %s with error %s", icdId, err)) } d.Set("adminuser", cdb.AdminUser) d.Set("version", cdb.Version) groupList, err := icdClient.Groups().GetGroups(icdId) if err != nil { - return fmt.Errorf("[ERROR] Error getting database groups: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database groups: %s", err)) } d.Set("groups", flex.FlattenIcdGroups(groupList)) d.Set("node_count", groupList.Groups[0].Members.AllocationCount) @@ -1377,13 +1600,13 @@ func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e autoSclaingGroup, err := icdClient.AutoScaling().GetAutoScaling(icdId, "member") if err != nil { - return fmt.Errorf("[ERROR] Error getting database autoscaling groups: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database autoscaling groups: %s", err)) } d.Set("auto_scaling", flattenICDAutoScalingGroup(autoSclaingGroup)) whitelist, err := icdClient.Whitelists().GetWhitelist(icdId) if err != nil { - return fmt.Errorf("[ERROR] Error getting database whitelist: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database whitelist: %s", err)) } d.Set("whitelist", flex.FlattenWhitelist(whitelist)) @@ -1399,7 +1622,7 @@ func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e userName := user.UserName csEntry, err := getConnectionString(d, userName, connectionEndpoint, meta) if err != nil { - return fmt.Errorf("[ERROR] Error getting user connection string for user (%s): %s", userName, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting user connection string for user (%s): %s", userName, err)) } connectionStrings = append(connectionStrings, csEntry) } @@ -1408,24 +1631,24 @@ func resourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e if serviceOff == "databases-for-postgresql" || serviceOff == "databases-for-redis" || serviceOff == "databases-for-enterprisedb" { configSchema, err := icdClient.Configurations().GetConfiguration(icdId) if err != nil { - return fmt.Errorf("[ERROR] Error getting database (%s) configuration schema : %s", icdId, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database (%s) configuration schema : %s", icdId, err)) } s, err := json.Marshal(configSchema) if err != nil { - return fmt.Errorf("[ERROR] Error marshalling the database configuration schema: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error marshalling the database configuration schema: %s", err)) } if err = d.Set("configuration_schema", string(s)); err != nil { - return fmt.Errorf("[ERROR] Error setting the database configuration schema: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error setting the database configuration schema: %s", err)) } } return nil } -func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceIBMDatabaseInstanceUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { rsConClient, err := meta.(conns.ClientSession).ResourceControllerV2API() if err != nil { - return err + return diag.FromErr(err) } instanceID := d.Id() @@ -1451,19 +1674,17 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) if update { _, response, err := rsConClient.UpdateResourceInstance(&updateReq) if err != nil { - return fmt.Errorf("[ERROR] Error updating resource instance: %s %s", err, response) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating resource instance: %s %s", err, response)) } _, err = waitForDatabaseInstanceUpdate(d, meta) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for update of resource instance (%s) to complete: %s", d.Id(), err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for update of resource instance (%s) to complete: %s", d.Id(), err)) } - } if d.HasChange("tags") { - oldList, newList := d.GetChange("tags") err = flex.UpdateTagsUsingCRN(oldList, newList, meta, instanceID) if err != nil { @@ -1474,14 +1695,14 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) icdClient, err := meta.(conns.ClientSession).ICDAPI() if err != nil { - return fmt.Errorf("[ERROR] Error getting database client settings: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting database client settings: %s", err)) } icdId := flex.EscapeUrlParm(instanceID) if d.HasChange("node_count") { err = horizontalScale(d, meta, icdClient) if err != nil { - return err + return diag.FromErr(err) } } if d.HasChange("configuration") { @@ -1493,17 +1714,17 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) configPayload := icdv4.ConfigurationReq{Configuration: configuration} task, err := icdClient.Configurations().UpdateConfiguration(icdId, configPayload) if err != nil { - return fmt.Errorf("[ERROR] Error updating database (%s) configuration: %s", icdId, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database (%s) configuration: %s", icdId, err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) configuration update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) configuration update task to complete: %s", icdId, err)) } } } else { - return fmt.Errorf("[ERROR] given database type %s is not configurable", service) + return diag.FromErr(fmt.Errorf("[ERROR] given database type %s is not configurable", service)) } } @@ -1546,13 +1767,94 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) task, err := icdClient.Groups().UpdateGroup(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) scaling group update task to complete: %s", icdId, err)) + } + } + + cloudDatabasesClient, err := meta.(conns.ClientSession).CloudDatabasesV5() + if err != nil { + return diag.FromErr(err) + } + + if d.HasChange("group") { + oldGroup, newGroup := d.GetChange("group") + if oldGroup == nil { + oldGroup = new(schema.Set) + } + if newGroup == nil { + newGroup = new(schema.Set) + } + + os := oldGroup.(*schema.Set) + ns := newGroup.(*schema.Set) + + groupChanges := expandGroups(ns.Difference(os).List()) + + groupsResponse, err := getGroups(instanceID, meta) + if err != nil { + return diag.FromErr(fmt.Errorf( + "[ERROR] Error geting group (%s) scaling group update task to complete: %s", icdId, err)) + } + + currentGroups := normalizeGroups(groupsResponse) + + for _, group := range groupChanges { + groupScaling := &clouddatabasesv5.GroupScaling{} + var currentGroup *Group + for _, g := range currentGroups { + if g.ID == group.ID { + currentGroup = &g + break + } + } + + if currentGroup == nil { + return diag.FromErr(fmt.Errorf( + "[ERROR] (%s) group does not exist: %s", icdId, err)) + } + nodeCount := currentGroup.Members.Allocation + + if group.Members != nil && group.Members.Allocation != currentGroup.Members.Allocation { + groupScaling.Members = &clouddatabasesv5.GroupScalingMembers{AllocationCount: core.Int64Ptr(int64(group.Members.Allocation))} + nodeCount = group.Members.Allocation + } + if group.Memory != nil && group.Memory.Allocation != currentGroup.Memory.Allocation { + groupScaling.Memory = &clouddatabasesv5.GroupScalingMemory{AllocationMb: core.Int64Ptr(int64(group.Memory.Allocation * nodeCount))} + } + if group.Disk != nil && group.Disk.Allocation != currentGroup.Disk.Allocation { + groupScaling.Disk = &clouddatabasesv5.GroupScalingDisk{AllocationMb: core.Int64Ptr(int64(group.Disk.Allocation * nodeCount))} + } + if group.CPU != nil && group.CPU.Allocation != currentGroup.CPU.Allocation { + groupScaling.CPU = &clouddatabasesv5.GroupScalingCPU{AllocationCount: core.Int64Ptr(int64(group.CPU.Allocation * nodeCount))} + } + + if groupScaling.Members != nil || groupScaling.Memory != nil || groupScaling.Disk != nil || groupScaling.CPU != nil { + setDeploymentScalingGroupOptions := &clouddatabasesv5.SetDeploymentScalingGroupOptions{ + ID: &instanceID, + GroupID: &group.ID, + Group: groupScaling, + } + + setDeploymentScalingGroupResponse, response, err := cloudDatabasesClient.SetDeploymentScalingGroup(setDeploymentScalingGroupOptions) + + if response.StatusCode > 300 { + return diag.FromErr(err) + } + + taskIDLink := *setDeploymentScalingGroupResponse.Task.ID + + _, err = waitForDatabaseTaskComplete(taskIDLink, d, meta, d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return diag.FromErr(err) + } + } } } @@ -1561,17 +1863,17 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) params := icdv4.AutoscalingSetGroup{} cpuBody, err := expandICDAutoScalingGroup(d, cpuRecord, "cpu") if err != nil { - return fmt.Errorf("[ERROR] Error in getting cpuBody from expandICDAutoScalingGroup %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error in getting cpuBody from expandICDAutoScalingGroup %s", err)) } params.Autoscaling.CPU = &cpuBody task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database cpu auto_scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database cpu auto_scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) cpu auto_scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) cpu auto_scaling group update task to complete: %s", icdId, err)) } } @@ -1580,17 +1882,17 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) params := icdv4.AutoscalingSetGroup{} diskBody, err := expandICDAutoScalingGroup(d, diskRecord, "disk") if err != nil { - return fmt.Errorf("[ERROR] Error in getting diskBody from expandICDAutoScalingGroup %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error in getting diskBody from expandICDAutoScalingGroup %s", err)) } params.Autoscaling.Disk = &diskBody task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database disk auto_scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database disk auto_scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) disk auto_scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) disk auto_scaling group update task to complete: %s", icdId, err)) } } @@ -1599,17 +1901,17 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) params := icdv4.AutoscalingSetGroup{} memoryBody, err := expandICDAutoScalingGroup(d, memoryRecord, "memory") if err != nil { - return fmt.Errorf("[ERROR] Error in getting memoryBody from expandICDAutoScalingGroup %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error in getting memoryBody from expandICDAutoScalingGroup %s", err)) } params.Autoscaling.Memory = &memoryBody task, err := icdClient.AutoScaling().SetAutoScaling(icdId, "member", params) if err != nil { - return fmt.Errorf("[ERROR] Error updating database memory auto_scaling group: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database memory auto_scaling group: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) memory auto_scaling group update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) memory auto_scaling group update task to complete: %s", icdId, err)) } } @@ -1624,12 +1926,12 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } task, err := icdClient.Users().UpdateUser(icdId, adminUser, userParams) if err != nil { - return fmt.Errorf("[ERROR] Error updating database admin password: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database admin password: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) admin password update task to complete: %s", icdId, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) admin password update task to complete: %s", icdId, err)) } } @@ -1658,12 +1960,12 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } task, err := icdClient.Whitelists().CreateWhitelist(icdId, whitelistReq) if err != nil { - return fmt.Errorf("[ERROR] Error updating database whitelist entry %v : %s", wlEntry.Address, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database whitelist entry %v : %s", wlEntry.Address, err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) whitelist create task to complete for entry %s : %s", icdId, wlEntry.Address, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) whitelist create task to complete for entry %s : %s", icdId, wlEntry.Address, err)) } } @@ -1680,12 +1982,12 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) ipAddress := wlEntry.Address task, err := icdClient.Whitelists().DeleteWhitelist(icdId, ipAddress) if err != nil { - return fmt.Errorf("[ERROR] Error deleting database whitelist entry: %s", err) + return diag.FromErr(fmt.Errorf("[ERROR] Error deleting database whitelist entry: %s", err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) whitelist delete task to complete for ipAddress %s : %s", icdId, ipAddress, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) whitelist delete task to complete for ipAddress %s : %s", icdId, ipAddress, err)) } } @@ -1726,18 +2028,18 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } task, err := icdClient.Users().UpdateUser(icdId, newEntry["name"].(string), userParams) if err != nil { - return fmt.Errorf("[ERROR] Error updating database user (%s) password: %s", newEntry["name"].(string), err) + return diag.FromErr(fmt.Errorf("[ERROR] Error updating database user (%s) password: %s", newEntry["name"].(string), err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) user (%s) password update task to complete: %s", icdId, newEntry["name"].(string), err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) user (%s) password update task to complete: %s", icdId, newEntry["name"].(string), err)) } } else { _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) user (%s) create task to complete: %s", icdId, newEntry["name"].(string), err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) user (%s) create task to complete: %s", icdId, newEntry["name"].(string), err)) } } } @@ -1754,18 +2056,18 @@ func resourceIBMDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) user := userEntry.UserName task, err := icdClient.Users().DeleteUser(icdId, user) if err != nil { - return fmt.Errorf("[ERROR] Error deleting database user (%s) entry: %s", user, err) + return diag.FromErr(fmt.Errorf("[ERROR] Error deleting database user (%s) entry: %s", user, err)) } _, err = waitForDatabaseTaskComplete(task.Id, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for database (%s) user (%s) delete task to complete: %s", icdId, user, err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for database (%s) user (%s) delete task to complete: %s", icdId, user, err)) } } } } - return resourceIBMDatabaseInstanceRead(d, meta) + return resourceIBMDatabaseInstanceRead(context, d, meta) } func horizontalScale(d *schema.ResourceData, meta interface{}, icdClient icdv4.ICDServiceAPI) error { @@ -1872,10 +2174,10 @@ func getConnectionString(d *schema.ResourceData, userName, connectionEndpoint st return csEntry, nil } -func resourceIBMDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { +func resourceIBMDatabaseInstanceDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { rsConClient, err := meta.(conns.ClientSession).ResourceControllerV2API() if err != nil { - return err + return diag.FromErr(err) } id := d.Id() recursive := true @@ -1892,14 +2194,14 @@ func resourceIBMDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) log.Printf("[WARN] Resource instance already deleted %s\n ", err) err = nil } else { - return fmt.Errorf("[ERROR] Error deleting resource instance: %s %s ", err, response) + return diag.FromErr(fmt.Errorf("[ERROR] Error deleting resource instance: %s %s ", err, response)) } } _, err = waitForDatabaseInstanceDelete(d, meta) if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for resource instance (%s) to be deleted: %s", d.Id(), err) + return diag.FromErr(fmt.Errorf( + "[ERROR] Error waiting for resource instance (%s) to be deleted: %s", d.Id(), err)) } d.SetId("") @@ -2237,3 +2539,198 @@ func flattenICDAutoScalingGroup(autoScalingGroup icdv4.AutoscalingGetGroup) []ma result = append(result, as) return result } + +func normalizeGroups(_groups []clouddatabasesv5.Group) (groups []Group) { + groups = make([]Group, len(_groups)) + for _, g := range _groups { + group := Group{ID: *g.ID} + + group.Members = &GroupResource{ + Units: *g.Members.Units, + Allocation: int(*g.Members.AllocationCount), + Minimum: int(*g.Members.MinimumCount), + Maximum: int(*g.Members.MaximumCount), + StepSize: int(*g.Members.StepSizeCount), + IsAdjustable: *g.Members.IsAdjustable, + CanScaleDown: *g.Members.CanScaleDown, + } + + group.Memory = &GroupResource{ + Units: *g.Memory.Units, + Allocation: int(*g.Memory.AllocationMb), + Minimum: int(*g.Memory.MinimumMb), + Maximum: int(*g.Memory.MaximumMb), + StepSize: int(*g.Memory.StepSizeMb), + IsAdjustable: *g.Memory.IsAdjustable, + CanScaleDown: *g.Memory.CanScaleDown, + } + + group.Disk = &GroupResource{ + Units: *g.Disk.Units, + Allocation: int(*g.Disk.AllocationMb), + Minimum: int(*g.Disk.MinimumMb), + Maximum: int(*g.Disk.MaximumMb), + StepSize: int(*g.Disk.StepSizeMb), + IsAdjustable: *g.Disk.IsAdjustable, + CanScaleDown: *g.Disk.CanScaleDown, + } + + group.CPU = &GroupResource{ + Units: *g.CPU.Units, + Allocation: int(*g.CPU.AllocationCount), + Minimum: int(*g.CPU.MinimumCount), + Maximum: int(*g.CPU.MaximumCount), + StepSize: int(*g.CPU.StepSizeCount), + IsAdjustable: *g.CPU.IsAdjustable, + CanScaleDown: *g.CPU.CanScaleDown, + } + + groups = append(groups, group) + } + + return groups +} + +func expandGroups(_groups []interface{}) []*Group { + if len(_groups) == 0 { + return nil + } + + groups := make([]*Group, 0, len(_groups)) + + for _, groupRaw := range _groups { + if tfGroup, ok := groupRaw.(map[string]interface{}); ok { + group := Group{ID: tfGroup["group_id"].(string)} + + if membersSet, ok := tfGroup["members"].(*schema.Set); ok { + members := membersSet.List() + if len(members) != 0 { + group.Members = &GroupResource{Allocation: members[0].(map[string]interface{})["allocation_count"].(int)} + } + } + + if memorySet, ok := tfGroup["memory"].(*schema.Set); ok { + memory := memorySet.List() + if len(memory) != 0 { + group.Memory = &GroupResource{Allocation: memory[0].(map[string]interface{})["allocation_mb"].(int)} + } + } + + if diskSet, ok := tfGroup["disk"].(*schema.Set); ok { + disk := diskSet.List() + if len(disk) != 0 { + group.Disk = &GroupResource{Allocation: disk[0].(map[string]interface{})["allocation_mb"].(int)} + } + } + + if cpuSet, ok := tfGroup["cpu"].(*schema.Set); ok { + cpu := cpuSet.List() + if len(cpu) != 0 { + group.CPU = &GroupResource{Allocation: cpu[0].(map[string]interface{})["allocation_count"].(int)} + } + } + + groups = append(groups, &group) + } + } + + // analytics must be created before bi_connector + sortPriority := map[string]int{ + "members": 10, + "analytics": 2, + "bi_connector": 1, + } + + sort.SliceStable(groups, func(i, j int) bool { + return sortPriority[groups[i].ID] > sortPriority[groups[j].ID] + }) + + return groups +} + +func checkV5Groups(_ context.Context, diff *schema.ResourceDiff, meta interface{}) (err error) { + instanceID := diff.Id() + service := diff.Get("service").(string) + + if group, ok := diff.GetOk("group"); ok { + var currentGroups []Group + var groupList []clouddatabasesv5.Group + var groupIds []string + + if instanceID != "" { + groupList, err = getGroups(instanceID, meta) + } else { + groupList, err = getDefaultScalingGroups(service, meta) + } + + if err != nil { + return err + } + + currentGroups = normalizeGroups(groupList) + + tfGroups := expandGroups(group.(*schema.Set).List()) + + // Check group_ids are unique + groupIds = make([]string, 0, len(tfGroups)) + for _, g := range tfGroups { + groupIds = append(groupIds, g.ID) + } + + for n1, i1 := range groupIds { + for n2, i2 := range groupIds { + if i1 == i2 && n1 != n2 { + return fmt.Errorf("found 2 or more instances of group with group_id %v", i1) + } + } + } + + // Get default or current group scaling values + for _, group := range tfGroups { + if group == nil { + break + } + groupId := group.ID + var groupDefaults *Group + for _, g := range currentGroups { + if g.ID == groupId { + groupDefaults = &g + break + } + } + + // set current nodeCount + nodeCount := groupDefaults.Members.Allocation + + if group.Members != nil { + err = checkGroupScaling(groupId, "members", group.Members.Allocation, groupDefaults.Members, 1) + if err != nil { + return err + } + } + + if group.Memory != nil { + err = checkGroupScaling(groupId, "memory", group.Memory.Allocation, groupDefaults.Memory, nodeCount) + if err != nil { + return err + } + } + + if group.Disk != nil { + err = checkGroupScaling(groupId, "disk", group.Disk.Allocation, groupDefaults.Disk, nodeCount) + if err != nil { + return err + } + } + + if group.CPU != nil { + err = checkGroupScaling(groupId, "cpu", group.CPU.Allocation, groupDefaults.CPU, nodeCount) + if err != nil { + return err + } + } + } + } + + return nil +} diff --git a/ibm/service/database/resource_ibm_database_cassandra_test.go b/ibm/service/database/resource_ibm_database_cassandra_test.go index 5680cd6d5a..29e6695e5f 100644 --- a/ibm/service/database/resource_ibm_database_cassandra_test.go +++ b/ibm/service/database/resource_ibm_database_cassandra_test.go @@ -17,7 +17,7 @@ func TestAccIBMCassandraDatabaseInstanceBasic(t *testing.T) { t.Parallel() databaseResourceGroup := "default" var databaseInstanceOne string - rnd := fmt.Sprintf("tf-Es-%d", acctest.RandIntRange(10, 100)) + rnd := fmt.Sprintf("tf-Datastax-%d", acctest.RandIntRange(10, 100)) testName := rnd name := "ibm_database." + testName @@ -83,7 +83,7 @@ func TestAccIBMDatabaseInstance_Cassandra_Node(t *testing.T) { t.Parallel() databaseResourceGroup := "default" var databaseInstanceOne string - rnd := fmt.Sprintf("tf-Es-%d", acctest.RandIntRange(10, 100)) + rnd := fmt.Sprintf("tf-Datastax-%d", acctest.RandIntRange(10, 100)) testName := rnd name := "ibm_database." + testName @@ -175,13 +175,120 @@ func TestAccIBMDatabaseInstance_Cassandra_Node(t *testing.T) { }) } +func TestAccIBMDatabaseInstance_Cassandra_Group(t *testing.T) { + t.Parallel() + databaseResourceGroup := "default" + var databaseInstanceOne string + rnd := fmt.Sprintf("tf-Datastax-%d", acctest.RandIntRange(10, 100)) + testName := rnd + name := "ibm_database." + testName + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMDatabaseInstanceCassandraGroupBasic(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-cassandra"), + resource.TestCheckResourceAttr(name, "plan", "enterprise"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "adminuser", "admin"), + resource.TestCheckResourceAttr(name, "node_count", "3"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "12288"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "20480"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "6"), + resource.TestCheckResourceAttr(name, "groups.0.count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "36864"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "18"), + resource.TestCheckResourceAttr(name, "whitelist.#", "1"), + resource.TestCheckResourceAttr(name, "users.#", "1"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "2"), + resource.TestCheckResourceAttr(name, "connectionstrings.1.name", "admin"), + resource.TestCheckResourceAttr(name, "connectionstrings.0.hosts.#", "1"), + resource.TestCheckResourceAttr(name, "connectionstrings.0.database", ""), + ), + }, + { + Config: testAccCheckIBMDatabaseInstanceCassandraGroupFullyspecified(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-cassandra"), + resource.TestCheckResourceAttr(name, "plan", "enterprise"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "node_count", "3"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "12416"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "20480"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "6"), + resource.TestCheckResourceAttr(name, "groups.0.count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "37248"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "18"), + resource.TestCheckResourceAttr(name, "whitelist.#", "2"), + resource.TestCheckResourceAttr(name, "users.#", "2"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "3"), + resource.TestCheckResourceAttr(name, "connectionstrings.2.name", "admin"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstanceCassandraGroupReduced(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-cassandra"), + resource.TestCheckResourceAttr(name, "plan", "enterprise"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "node_count", "3"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "12288"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "20480"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "6"), + resource.TestCheckResourceAttr(name, "groups.0.count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "36864"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "18"), + resource.TestCheckResourceAttr(name, "whitelist.#", "0"), + resource.TestCheckResourceAttr(name, "users.#", "0"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstanceCassandraGroupScaleOut(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-cassandra"), + resource.TestCheckResourceAttr(name, "plan", "enterprise"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "node_count", "4"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "12288"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "20480"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "6"), + resource.TestCheckResourceAttr(name, "groups.0.count", "4"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "81920"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "24"), + resource.TestCheckResourceAttr(name, "groups.1.count", "3"), + resource.TestCheckResourceAttr(name, "whitelist.#", "0"), + resource.TestCheckResourceAttr(name, "users.#", "0"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), + ), + }, + }, + }) +} + // TestAccIBMDatabaseInstance_CreateAfterManualDestroy not required as tested by resource_instance tests func TestAccIBMDatabaseInstanceCassandraImport(t *testing.T) { t.Parallel() databaseResourceGroup := "default" var databaseInstanceOne string - serviceName := fmt.Sprintf("tf-Es-%d", acctest.RandIntRange(10, 100)) + serviceName := fmt.Sprintf("tf-Datastax-%d", acctest.RandIntRange(10, 100)) //serviceName := "test_acc" resourceName := "ibm_database." + serviceName @@ -219,7 +326,7 @@ func testAccCheckIBMDatabaseInstanceCassandraBasic(databaseResourceGroup string, is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -253,7 +360,7 @@ func testAccCheckIBMDatabaseInstanceCassandraFullyspecified(databaseResourceGrou is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -286,7 +393,7 @@ func testAccCheckIBMDatabaseInstanceCassandraFullyspecified(databaseResourceGrou delete = "15m" } } - + `, databaseResourceGroup, name) } @@ -296,7 +403,7 @@ func testAccCheckIBMDatabaseInstanceCassandraReduced(databaseResourceGroup strin is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -322,7 +429,7 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeBasic(databaseResourceGroup str is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -330,10 +437,10 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeBasic(databaseResourceGroup str plan = "enterprise" location = "us-south" adminpassword = "password12" - node_count = 3 + node_count = 3 node_memory_allocation_mb = 12288 node_disk_allocation_mb = 20480 - node_cpu_allocation_count = 6 + node_cpu_allocation_count = 6 users { name = "user123" @@ -359,34 +466,35 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeFullyspecified(databaseResource is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" service = "databases-for-cassandra" plan = "enterprise" + version = "5.1" location = "us-south" adminpassword = "password12" - node_count = 3 + node_count = 3 node_memory_allocation_mb = 12416 node_disk_allocation_mb = 20480 - node_cpu_allocation_count = 6 + node_cpu_allocation_count = 6 users { - name = "user123" - password = "password12" + name = "user123" + password = "password12" } users { - name = "user124" - password = "password12" + name = "user124" + password = "password12" } whitelist { - address = "172.168.1.2/32" - description = "desc1" + address = "172.168.1.2/32" + description = "desc1" } whitelist { - address = "172.168.1.1/32" - description = "desc" + address = "172.168.1.1/32" + description = "desc" } timeouts { @@ -395,7 +503,7 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeFullyspecified(databaseResource delete = "15m" } } - + `, databaseResourceGroup, name) } @@ -405,18 +513,19 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeReduced(databaseResourceGroup s is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" service = "databases-for-cassandra" plan = "enterprise" + version = "5.1" location = "us-south" adminpassword = "password12" - node_count = 3 + node_count = 3 node_memory_allocation_mb = 12288 node_disk_allocation_mb = 20480 - node_cpu_allocation_count = 6 + node_cpu_allocation_count = 6 timeouts { create = "480m" @@ -433,7 +542,7 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeScaleOut(databaseResourceGroup is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -441,10 +550,205 @@ func testAccCheckIBMDatabaseInstanceCassandraNodeScaleOut(databaseResourceGroup plan = "enterprise" location = "us-south" adminpassword = "password12" - node_count = 4 + node_count = 4 node_memory_allocation_mb = 12288 node_disk_allocation_mb = 20480 - node_cpu_allocation_count = 6 + node_cpu_allocation_count = 6 + + timeouts { + create = "480m" + update = "480m" + delete = "15m" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceCassandraGroupBasic(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-cassandra" + plan = "enterprise" + version = "5.1" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 12288 + } + disk { + allocation_mb = 20480 + } + cpu { + allocation_count = 6 + } + } + users { + name = "user123" + password = "password12" + } + whitelist { + address = "172.168.1.2/32" + description = "desc1" + } + + timeouts { + create = "480m" + update = "480m" + delete = "15m" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceCassandraGroupFullyspecified(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-cassandra" + plan = "enterprise" + version = "5.1" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 12416 + } + disk { + allocation_mb = 20480 + } + cpu { + allocation_count = 6 + } + } + users { + name = "user123" + password = "password12" + } + users { + name = "user124" + password = "password12" + } + whitelist { + address = "172.168.1.2/32" + description = "desc1" + } + whitelist { + address = "172.168.1.1/32" + description = "desc" + } + + timeouts { + create = "480m" + update = "480m" + delete = "15m" + } + } + + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceCassandraGroupReduced(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-cassandra" + plan = "enterprise" + version = "5.1" + location = "us-south" + adminpassword = "password12" + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 12288 + } + disk { + allocation_mb = 20480 + } + cpu { + allocation_count = 6 + } + } + + timeouts { + create = "480m" + update = "480m" + delete = "15m" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceCassandraGroupScaleOut(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-cassandra" + plan = "enterprise" + version = "5.1" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 4 + } + memory { + allocation_mb = 12288 + } + disk { + allocation_mb = 20480 + } + cpu { + allocation_count = 6 + } + } + + group { + group_id = "search" + members { + allocation_count = 3 + } + } timeouts { create = "480m" @@ -461,7 +765,7 @@ func testAccCheckIBMDatabaseInstanceCassandraImport(databaseResourceGroup string is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -477,7 +781,5 @@ func testAccCheckIBMDatabaseInstanceCassandraImport(databaseResourceGroup string } - - `, databaseResourceGroup, name) } diff --git a/ibm/service/database/resource_ibm_database_elasticsearch_test.go b/ibm/service/database/resource_ibm_database_elasticsearch_test.go index f85d0b0345..cad003113a 100644 --- a/ibm/service/database/resource_ibm_database_elasticsearch_test.go +++ b/ibm/service/database/resource_ibm_database_elasticsearch_test.go @@ -181,6 +181,114 @@ func TestAccIBMDatabaseInstance_Elasticsearch_Node(t *testing.T) { }) } +func TestAccIBMDatabaseInstance_Elasticsearch_Group(t *testing.T) { + t.Parallel() + databaseResourceGroup := "default" + var databaseInstanceOne string + rnd := fmt.Sprintf("tf-Es-%d", acctest.RandIntRange(10, 100)) + testName := rnd + name := "ibm_database." + testName + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMDatabaseInstanceElasticsearchGroupBasic(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "adminuser", "admin"), + resource.TestCheckResourceAttr(name, "node_count", "3"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "5120"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), + + resource.TestCheckResourceAttr(name, "whitelist.#", "1"), + resource.TestCheckResourceAttr(name, "users.#", "1"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "2"), + resource.TestCheckResourceAttr(name, "connectionstrings.1.name", "admin"), + resource.TestCheckResourceAttr(name, "connectionstrings.0.hosts.#", "1"), + resource.TestCheckResourceAttr(name, "connectionstrings.0.database", ""), + ), + }, + { + Config: testAccCheckIBMDatabaseInstanceElasticsearchGroupFullyspecified(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "node_count", "3"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "9"), + resource.TestCheckResourceAttr(name, "whitelist.#", "2"), + resource.TestCheckResourceAttr(name, "users.#", "2"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "3"), + resource.TestCheckResourceAttr(name, "connectionstrings.2.name", "admin"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstanceElasticsearchGroupReduced(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "node_count", "3"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "9"), + resource.TestCheckResourceAttr(name, "whitelist.#", "0"), + resource.TestCheckResourceAttr(name, "users.#", "0"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstanceElasticsearchGroupScaleOut(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "node_count", "4"), + resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), + resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), + resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.count", "4"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "4096"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "24576"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "12"), + resource.TestCheckResourceAttr(name, "whitelist.#", "0"), + resource.TestCheckResourceAttr(name, "users.#", "0"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), + ), + }, + //{ + // ResourceName: name, + // ImportState: true, + // ImportStateVerify: true, + //}, + }, + }) +} + // TestAccIBMDatabaseInstance_CreateAfterManualDestroy not required as tested by resource_instance tests func TestAccIBMDatabaseInstanceElasticsearchImport(t *testing.T) { @@ -225,7 +333,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchBasic(databaseResourceGroup str is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -259,7 +367,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchFullyspecified(databaseResource is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -292,7 +400,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchFullyspecified(databaseResource delete = "15m" } } - + `, databaseResourceGroup, name) } @@ -302,7 +410,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchReduced(databaseResourceGroup s is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -328,7 +436,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeBasic(databaseResourceGroup is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -365,7 +473,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeFullyspecified(databaseReso is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -400,7 +508,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeFullyspecified(databaseReso delete = "15m" } } - + `, databaseResourceGroup, name) } @@ -410,7 +518,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeReduced(databaseResourceGro is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -438,7 +546,7 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeScaleOut(databaseResourceGr is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -460,13 +568,198 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeScaleOut(databaseResourceGr `, databaseResourceGroup, name) } +func testAccCheckIBMDatabaseInstanceElasticsearchGroupBasic(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-elasticsearch" + plan = "standard" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 1024 + } + disk { + allocation_mb = 5120 + } + cpu { + allocation_count = 3 + } + } + + users { + name = "user123" + password = "password12" + } + whitelist { + address = "172.168.1.2/32" + description = "desc1" + } + + timeouts { + create = "120m" + update = "120m" + delete = "15m" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceElasticsearchGroupFullyspecified(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-elasticsearch" + plan = "standard" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 1024 + } + disk { + allocation_mb = 6144 + } + cpu { + allocation_count = 3 + } + } + users { + name = "user123" + password = "password12" + } + users { + name = "user124" + password = "password12" + } + whitelist { + address = "172.168.1.2/32" + description = "desc1" + } + whitelist { + address = "172.168.1.1/32" + description = "desc" + } + + timeouts { + create = "120m" + update = "120m" + delete = "15m" + } + } + + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceElasticsearchGroupReduced(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-elasticsearch" + plan = "standard" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 1024 + } + disk { + allocation_mb = 6144 + } + cpu { + allocation_count = 3 + } + } + + timeouts { + create = "120m" + update = "120m" + delete = "15m" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstanceElasticsearchGroupScaleOut(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default = true + # name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-elasticsearch" + plan = "standard" + location = "us-south" + adminpassword = "password12" + + group { + group_id = "member" + members { + allocation_count = 4 + } + memory { + allocation_mb = 1024 + } + disk { + allocation_mb = 6144 + } + cpu { + allocation_count = 3 + } + } + timeouts { + create = "120m" + update = "120m" + delete = "15m" + } + } + `, databaseResourceGroup, name) +} + func testAccCheckIBMDatabaseInstanceElasticsearchImport(databaseResourceGroup string, name string) string { return fmt.Sprintf(` data "ibm_resource_group" "test_acc" { is_default = true # name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" diff --git a/ibm/service/database/resource_ibm_database_postgresql_test.go b/ibm/service/database/resource_ibm_database_postgresql_test.go index 867f205872..467fa8bb13 100644 --- a/ibm/service/database/resource_ibm_database_postgresql_test.go +++ b/ibm/service/database/resource_ibm_database_postgresql_test.go @@ -35,7 +35,7 @@ const ( databaseInstanceReclamation = "pending_reclamation" ) -func TestAccIBMDatabaseInstance_Postgres_Basic(t *testing.T) { +func TestAccIBMDatabaseInstancePostgresBasic(t *testing.T) { t.Parallel() databaseResourceGroup := "default" var databaseInstanceOne string @@ -118,7 +118,7 @@ func TestAccIBMDatabaseInstance_Postgres_Basic(t *testing.T) { }) } -func TestAccIBMDatabaseInstance_Postgres_Node(t *testing.T) { +func TestAccIBMDatabaseInstancePostgresNode(t *testing.T) { t.Parallel() databaseResourceGroup := "default" var databaseInstanceOne string @@ -224,6 +224,107 @@ func TestAccIBMDatabaseInstance_Postgres_Node(t *testing.T) { }) } +func TestAccIBMDatabaseInstancePostgresGroup(t *testing.T) { + t.Parallel() + databaseResourceGroup := "default" + var databaseInstanceOne string + rnd := fmt.Sprintf("tf-Pgress-%d", acctest.RandIntRange(10, 100)) + testName := rnd + name := "ibm_database." + testName + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMDatabaseInstancePostgresGroupBasic(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-postgresql"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "adminuser", "admin"), + resource.TestCheckResourceAttr(name, "groups.0.count", "2"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2048"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "10240"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "6"), + resource.TestCheckResourceAttr(name, "service_endpoints", "public"), + resource.TestCheckResourceAttr(name, "whitelist.#", "1"), + resource.TestCheckResourceAttr(name, "users.#", "1"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "2"), + resource.TestCheckResourceAttr(name, "connectionstrings.1.name", "admin"), + resource.TestMatchResourceAttr(name, "connectionstrings.1.certname", regexp.MustCompile("[-a-z0-9]*")), + resource.TestMatchResourceAttr(name, "connectionstrings.1.certbase64", regexp.MustCompile("^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$")), + resource.TestCheckResourceAttr(name, "tags.#", "1"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstancePostgresGroupFullyspecified(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-postgresql"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "groups.0.count", "2"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2304"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "14336"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "6"), + resource.TestCheckResourceAttr(name, "service_endpoints", "public-and-private"), + resource.TestCheckResourceAttr(name, "whitelist.#", "2"), + resource.TestCheckResourceAttr(name, "users.#", "2"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "3"), + resource.TestCheckResourceAttr(name, "connectionstrings.2.name", "admin"), + resource.TestCheckResourceAttr(name, "connectionstrings.0.hosts.#", "1"), + resource.TestCheckResourceAttr(name, "connectionstrings.0.scheme", "postgres"), + resource.TestMatchResourceAttr(name, "connectionstrings.0.certname", regexp.MustCompile("[-a-z0-9]*")), + resource.TestMatchResourceAttr(name, "connectionstrings.0.certbase64", regexp.MustCompile("^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$")), + resource.TestMatchResourceAttr(name, "connectionstrings.0.database", regexp.MustCompile("[-a-z0-9]+")), + resource.TestCheckResourceAttr(name, "tags.#", "1"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstancePostgresGroupReduced(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-postgresql"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "groups.0.count", "2"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2048"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "14336"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "6"), + resource.TestCheckResourceAttr(name, "whitelist.#", "0"), + resource.TestCheckResourceAttr(name, "users.#", "0"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), + resource.TestCheckResourceAttr(name, "tags.#", "1"), + ), + }, + { + Config: testAccCheckIBMDatabaseInstancePostgresGroupScaleOut(databaseResourceGroup, testName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMDatabaseInstanceExists(name, &databaseInstanceOne), + resource.TestCheckResourceAttr(name, "name", testName), + resource.TestCheckResourceAttr(name, "service", "databases-for-postgresql"), + resource.TestCheckResourceAttr(name, "plan", "standard"), + resource.TestCheckResourceAttr(name, "location", "us-south"), + resource.TestCheckResourceAttr(name, "groups.0.count", "3"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "21504"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "9"), + resource.TestCheckResourceAttr(name, "whitelist.#", "0"), + resource.TestCheckResourceAttr(name, "users.#", "0"), + resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), + resource.TestCheckResourceAttr(name, "tags.#", "1"), + ), + }, + }, + }) +} + // TestAccIBMDatabaseInstance_CreateAfterManualDestroy not required as tested by resource_instance tests func TestAccIBMDatabaseInstancePostgresImport(t *testing.T) { @@ -388,7 +489,7 @@ func testAccCheckIBMDatabaseInstancePostgresBasic(databaseResourceGroup string, data "ibm_resource_group" "test_acc" { name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -400,12 +501,12 @@ func testAccCheckIBMDatabaseInstancePostgresBasic(databaseResourceGroup string, members_disk_allocation_mb = 10240 tags = ["one:two"] users { - name = "user123" - password = "password12" + name = "user123" + password = "password12" } whitelist { - address = "172.168.1.2/32" - description = "desc1" + address = "172.168.1.2/32" + description = "desc1" } } `, databaseResourceGroup, name) @@ -416,7 +517,7 @@ func testAccCheckIBMDatabaseInstancePostgresFullyspecified(databaseResourceGroup data "ibm_resource_group" "test_acc" { name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -430,20 +531,20 @@ func testAccCheckIBMDatabaseInstancePostgresFullyspecified(databaseResourceGroup service_endpoints = "public-and-private" tags = ["one:two"] users { - name = "user123" - password = "password12" + name = "user123" + password = "password12" } users { - name = "user124" - password = "password12" + name = "user124" + password = "password12" } whitelist { - address = "172.168.1.2/32" - description = "desc1" + address = "172.168.1.2/32" + description = "desc1" } whitelist { - address = "172.168.1.1/32" - description = "desc" + address = "172.168.1.1/32" + description = "desc" } } `, databaseResourceGroup, name) @@ -453,9 +554,9 @@ func testAccCheckIBMDatabaseInstancePostgresReduced(databaseResourceGroup string return fmt.Sprintf(` data "ibm_resource_group" "test_acc" { name = "%[1]s" - } - - resource "ibm_database" "%[2]s" { + } + + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" service = "databases-for-postgresql" @@ -466,7 +567,7 @@ func testAccCheckIBMDatabaseInstancePostgresReduced(databaseResourceGroup string members_disk_allocation_mb = 14336 service_endpoints = "public" tags = ["one:two"] - } + } `, databaseResourceGroup, name) } @@ -475,7 +576,7 @@ func testAccCheckIBMDatabaseInstancePostgresNodeBasic(databaseResourceGroup stri data "ibm_resource_group" "test_acc" { name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -483,18 +584,18 @@ func testAccCheckIBMDatabaseInstancePostgresNodeBasic(databaseResourceGroup stri plan = "standard" location = "us-south" adminpassword = "password12" - node_count = 2 - node_memory_allocation_mb = 1024 - node_disk_allocation_mb = 7168 - node_cpu_allocation_count = 3 + node_count = 2 + node_memory_allocation_mb = 1024 + node_disk_allocation_mb = 5120 + node_cpu_allocation_count = 3 tags = ["one:two"] users { - name = "user123" - password = "password12" + name = "user123" + password = "password12" } whitelist { - address = "172.168.1.2/32" - description = "desc1" + address = "172.168.1.2/32" + description = "desc1" } } `, databaseResourceGroup, name) @@ -505,7 +606,7 @@ func testAccCheckIBMDatabaseInstancePostgresNodeFullyspecified(databaseResourceG data "ibm_resource_group" "test_acc" { name = "%[1]s" } - + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" @@ -513,27 +614,27 @@ func testAccCheckIBMDatabaseInstancePostgresNodeFullyspecified(databaseResourceG plan = "standard" location = "us-south" adminpassword = "password12" - node_count = 2 - node_memory_allocation_mb = 1024 - node_disk_allocation_mb = 7168 + node_count = 2 + node_memory_allocation_mb = 1024 + node_disk_allocation_mb = 7168 node_cpu_allocation_count = 3 service_endpoints = "public-and-private" tags = ["one:two"] users { - name = "user123" - password = "password12" + name = "user123" + password = "password12" } users { - name = "user124" - password = "password12" + name = "user124" + password = "password12" } whitelist { - address = "172.168.1.2/32" - description = "desc1" + address = "172.168.1.2/32" + description = "desc1" } whitelist { - address = "172.168.1.1/32" - description = "desc" + address = "172.168.1.1/32" + description = "desc" } } `, databaseResourceGroup, name) @@ -543,44 +644,203 @@ func testAccCheckIBMDatabaseInstancePostgresNodeReduced(databaseResourceGroup st return fmt.Sprintf(` data "ibm_resource_group" "test_acc" { name = "%[1]s" - } - - resource "ibm_database" "%[2]s" { + } + + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" service = "databases-for-postgresql" plan = "standard" location = "us-south" adminpassword = "password12" - node_count = 2 - node_memory_allocation_mb = 1024 - node_disk_allocation_mb = 7168 - node_cpu_allocation_count = 3 + node_count = 2 + node_memory_allocation_mb = 1024 + node_disk_allocation_mb = 7168 + node_cpu_allocation_count = 3 service_endpoints = "public" tags = ["one:two"] - } + } `, databaseResourceGroup, name) } func testAccCheckIBMDatabaseInstancePostgresNodeScaleOut(databaseResourceGroup string, name string) string { return fmt.Sprintf(` data "ibm_resource_group" "test_acc" { name = "%[1]s" - } - - resource "ibm_database" "%[2]s" { + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-postgresql" + plan = "standard" + location = "us-south" + adminpassword = "password12" + node_count = 3 + node_memory_allocation_mb = 1024 + node_disk_allocation_mb = 7168 + node_cpu_allocation_count = 3 + service_endpoints = "public" + tags = ["one:two"] + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstancePostgresGroupBasic(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-postgresql" + plan = "standard" + location = "us-south" + adminpassword = "password12" + tags = ["one:two"] + group { + group_id = "member" + members { + allocation_count = 2 + } + memory { + allocation_mb = 1024 + } + disk { + allocation_mb = 5120 + } + cpu { + allocation_count = 3 + } + } + users { + name = "user123" + password = "password12" + } + whitelist { + address = "172.168.1.2/32" + description = "desc1" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstancePostgresGroupFullyspecified(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-postgresql" + plan = "standard" + location = "us-south" + adminpassword = "password12" + service_endpoints = "public-and-private" + tags = ["one:two"] + group { + group_id = "member" + members { + allocation_count = 2 + } + memory { + allocation_mb = 1152 + } + disk { + allocation_mb = 7168 + } + cpu { + allocation_count = 3 + } + } + users { + name = "user123" + password = "password12" + } + users { + name = "user124" + password = "password12" + } + whitelist { + address = "172.168.1.2/32" + description = "desc1" + } + whitelist { + address = "172.168.1.1/32" + description = "desc" + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstancePostgresGroupReduced(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" service = "databases-for-postgresql" plan = "standard" location = "us-south" adminpassword = "password12" - node_count = 3 - node_memory_allocation_mb = 1024 - node_disk_allocation_mb = 7168 - node_cpu_allocation_count = 3 service_endpoints = "public" tags = ["one:two"] - } + group { + group_id = "member" + members { + allocation_count = 2 + } + memory { + allocation_mb = 512 + } + disk { + allocation_mb = 7168 + } + cpu { + allocation_count = 3 + } + } + } + `, databaseResourceGroup, name) +} + +func testAccCheckIBMDatabaseInstancePostgresGroupScaleOut(databaseResourceGroup string, name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + name = "%[1]s" + } + + resource "ibm_database" "%[2]s" { + resource_group_id = data.ibm_resource_group.test_acc.id + name = "%[2]s" + service = "databases-for-postgresql" + plan = "standard" + location = "us-south" + adminpassword = "password12" + group { + group_id = "member" + members { + allocation_count = 3 + } + memory { + allocation_mb = 1024 + } + disk { + allocation_mb = 7168 + } + cpu { + allocation_count = 3 + } + } + service_endpoints = "public" + tags = ["one:two"] + } `, databaseResourceGroup, name) } @@ -589,14 +849,14 @@ func testAccCheckIBMDatabaseInstancePostgresImport(databaseResourceGroup string, data "ibm_resource_group" "test_acc" { is_default = true # name = "%[1]s" - } - - resource "ibm_database" "%[2]s" { + } + + resource "ibm_database" "%[2]s" { resource_group_id = data.ibm_resource_group.test_acc.id name = "%[2]s" service = "databases-for-postgresql" plan = "standard" location = "us-south" - } + } `, databaseResourceGroup, name) } diff --git a/ibm/service/database/resource_ibm_database_redis_test.go b/ibm/service/database/resource_ibm_database_redis_test.go index 4f55b55a19..ace2b60c6e 100644 --- a/ibm/service/database/resource_ibm_database_redis_test.go +++ b/ibm/service/database/resource_ibm_database_redis_test.go @@ -52,7 +52,7 @@ func TestAccIBMDatabaseInstance_Redis_Basic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-redis"), resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", "us-south"), - resource.TestCheckResourceAttr(name, "members_memory_allocation_mb", "4096"), + resource.TestCheckResourceAttr(name, "members_memory_allocation_mb", "2304"), resource.TestCheckResourceAttr(name, "members_disk_allocation_mb", "4096"), resource.TestCheckResourceAttr(name, "whitelist.#", "2"), ), @@ -80,7 +80,6 @@ func TestAccIBMDatabaseInstanceRedisImport(t *testing.T) { databaseResourceGroup := "default" var databaseInstanceOne string serviceName := fmt.Sprintf("tf-redis-%d", acctest.RandIntRange(10, 100)) - //serviceName := "test_acc" resourceName := "ibm_database." + serviceName resource.Test(t, resource.TestCase{ @@ -183,7 +182,7 @@ func testAccCheckIBMDatabaseInstanceRedisFullyspecified(databaseResourceGroup st plan = "standard" location = "us-south" adminpassword = "password12" - members_memory_allocation_mb = 4096 + members_memory_allocation_mb = 2304 members_disk_allocation_mb = 4096 whitelist { address = "172.168.1.2/32" @@ -292,6 +291,11 @@ func testAccCheckIBMDatabaseInstanceRedisKPEncrypt(databaseResourceGroup string, key_protect_instance = ibm_resource_instance.kp_instance.guid key_protect_key = ibm_kp_key.test.id backup_encryption_key_crn = ibm_kp_key.test1.id + timeouts { + create = "480m" + update = "480m" + delete = "15m" + } } `, databaseResourceGroup, kpInstanceName, kpKeyName, kpByokName, name) } diff --git a/website/docs/r/database.html.markdown b/website/docs/r/database.html.markdown index b114696132..a2ff5acd52 100644 --- a/website/docs/r/database.html.markdown +++ b/website/docs/r/database.html.markdown @@ -8,9 +8,9 @@ description: |- # ibm_database -Create, update, or delete a IBM Cloud Database (ICD) instance. The `ibmcloud_api_key` that are used by Terraform should grant IAM rights to create and modify IBM Cloud Databases and have access to the resource group the ICD instance is associated with. For more information, see [documentation](https://cloud.ibm.com/docs/services/databases-for-postgresql/reference-access-management.html#identity-and-access-management) to manage ICD instances. +Create, update, or delete a IBM Cloud Database (ICD) instance. The `ibmcloud_api_key` that are used by Terraform should grant IAM rights to create and modify IBM Cloud Databases and have access to the resource group the ICD instance is associated with. For more information, see [documentation](https://cloud.ibm.com/docs/services/databases-for-postgresql/reference-access-management.html#identity-and-access-management) to manage ICD instances. -If `resource_group_id` is not specified, the ICD instance is created in the default resource group. The `API_KEY` must be assigned permissions for this group. +If `resource_group_id` is not specified, the ICD instance is created in the default resource group. The `API_KEY` must be assigned permissions for this group. Configuration of an ICD resource requires that the `region` parameter is set for the IBM provider in the `provider.tf` to be the same as the target ICD `location/region`. If not specified it default to `us-south`. A `terraform apply` fails if the ICD `location` is set differently. If the Terraform configuration needs to deploy resources into multiple regions, provider alias can be used. For more information, see [Terraform provider configuration](https://www.terraform.io/docs/configuration/providers.html#multiple-provider-instances). @@ -86,6 +86,57 @@ output "ICD Etcd database connection string" { ``` +### Sample database instance by using `group` attributes +An example to configure and deploy database by using `group` attributes. + +```terraform +data "ibm_resource_group" "group" { + name = "" +} + +resource "ibm_database" "" { + name = "" + plan = "standard" + location = "eu-gb" + service = "databases-for-etcd" + resource_group_id = data.ibm_resource_group.group.id + tags = ["tag1", "tag2"] + + adminpassword = "password12" + + group { + group_id = "member" + + memory { + allocation_mb = 1024 + } + + disk { + allocation_mb = 5120 + } + + cpu { + allocation_count = 3 + } + } + + users { + name = "user123" + password = "password12" + } + + whitelist { + address = "172.168.1.1/32" + description = "desc" + } +} + +output "ICD Etcd database connection string" { + value = "http://${ibm_database.test_acc.connectionstrings[0].composed}" +} + +``` + ### Sample database instance by using `point_in_time_recovery` An example for configuring `point_in_time_recovery` time by using `ibm_database` resource. @@ -268,7 +319,7 @@ resource "ibm_database" "db" { "max_connections": 400 } CONFIGURATION -} +} ``` @@ -287,7 +338,7 @@ For more information, about an example that are related to a VSI configuration t ## Timeouts -The following timeouts are defined for this resource. +The following timeouts are defined for this resource. * `Create` The creation of an instance is considered failed when no response is received for 60 minutes. * `Update` The update of an instance is considered failed when no response is received for 20 minutes. @@ -297,21 +348,21 @@ ICD create instance typically takes between 30 minutes to 45 minutes. Delete and ## Argument reference -Review the argument reference that you can specify for your resource. +Review the argument reference that you can specify for your resource. - `adminpassword` - (Optional, String) The password for the database administrator. If not specified, an empty string is provided for the password and the user ID cannot be used. In this case, more users must be specified in a `user` block. - `auto_scaling` (List , Optional) Configure rules to allow your database to automatically increase its resources. Single block of autoscaling is allowed at once. - + Nested scheme for `auto_scaling`: - `cpu` (List , Optional) Single block of CPU is allowed at once by CPU autoscaling. - + Nested scheme for `cpu`: - `rate_increase_percent` - (Optional, Integer) Auto scaling rate in increase percent. - `rate_limit_count_per_member` - (Optional, Integer) Auto scaling rate limit in count per number. - `rate_period_seconds` - (Optional, Integer) Period seconds of the auto scaling rate. - `rate_units` - (Optional, String) Auto scaling rate in units. - `disk` (List , Optional) Single block of disk is allowed at once in disk auto scaling. - + Nested scheme for `disk`: - `capacity_enabled` - (Optional, Bool) Auto scaling scalar enables or disables the scalar capacity. - `free_space_less_than_percent` - (Optional, Integer) Auto scaling scalar capacity free space less than percent. @@ -322,7 +373,7 @@ Review the argument reference that you can specify for your resource. - `rate_period_seconds` - (Optional, Integer) Auto scaling rate period in seconds. - `rate_units` - (Optional, String) Auto scaling rate in units. - `memory` (List , Optional) Memory Auto Scaling in single block of memory is allowed at once. - + Nested scheme for `memory`: - `io_above_percent` - (Optional, Integer) Auto scaling scalar I/O utilization above percent. - `io_enabled`-Bool-Optional-Auto scaling scalar I/O utilization enabled. @@ -338,6 +389,31 @@ Review the argument reference that you can specify for your resource. - `key_protect_key` - (Optional, Forces new resource, String) The root key CRN of a Key Management Services like Key Protect or Hyper Protect Crypto Service (HPCS) that you want to use for disk encryption. A key CRN is in the format `crn:v1:<…>:key:`. You can specify the root key during the database creation only. After the database is created, you cannot update the root key. For more information, refer [Disk encryption](https://cloud.ibm.com/docs/cloud-databases?topic=cloud-databases-key-protect#using-the-key-protect-key) documentation. - `key_protect_instance` - (Optional, Forces new resource, String) The instance CRN of a Key Management Services like Key Protect or Hyper Protect Crypto Service (HPCS) that you want to use for disk encryption. An instance CRN is in the format `crn:v1:<…>::`. - `location` - (Required, String) The location where you want to deploy your instance. The location must match the `region` parameter that you specify in the `provider` block of your Terraform configuration file. The default value is `us-south`. Currently, supported regions are `us-south`, `us-east`, `eu-gb`, `eu-de`, `au-syd`, `jp-tok`, `oslo01`. +- `group` - (Optional, Set) A set of group scaling values for the database. Multiple blocks are allowed. Can only be performed on is_adjustable=true groups. Values set are per-member. Values must be greater than or equal to the minimum size and must be a multiple of the step size. + + Nested scheme for `group`: + - `group_id` - (Optional, String) The ID of the scaling group. + + - `members` (Set, Optional) + + Nested scheme for `members`: + - `allocation_count` - (Optional, Integer) Allocated number of members. + + - `memory` (Set, Optional) Memory Auto Scaling in single block of memory is allowed at once. + + Nested scheme for `memory`: + - `allocation_mb` - (Optional, Integer) Allocated memory per-member. + + - `disk` (Set, Optional) + + Nested scheme for `disk`: + - `allocation_mb` - (Optional, Integer) Allocated disk per-member. + + - `cpu` (Set, Optional) + + Nested scheme for `cpu`: + - `allocation_count` - (Optional, Integer) Allocated dedicated CPU per-member. + - `members_memory_allocation_mb` - (Optional, Integer) The amount of memory in megabytes for the database, split across all members. If not specified, the default setting of the database service is used, which can vary by database type. - `members_disk_allocation_mb` - (Optional, Integer) The amount of disk space for the database, split across all members. If not specified, the default setting of the database service is used, which can vary by database type. - `members_cpu_allocation_count` - (Optional, Integer) Enables and allocates the number of specified dedicated cores to your deployment. @@ -346,7 +422,7 @@ Review the argument reference that you can specify for your resource. - `node_disk_allocation_mb` - (Optional, Integer) The disk size of the database per node. As above. - `node_memory_allocation_mb` - (Optional,Integer) The memory size for the database per node. If not specified defaults to the database default. These vary by database type. See the documentation related to each database for the defaults. https://cloud.ibm.com/docs/services/databases-for-postgresql/howto-provisioning.html#list-of-additional-parameters - ~> **Note:** `members_memory_allocation_mb`, `members_disk_allocation_mb`, `members_cpu_allocation_count` conflicts with `node_count`,`node_cpu_allocation_count`, `node_disk_allocation_mb`, `node_memory_allocation_mb` Either members or node arguments has to be provided + ~> **Note:** `members_memory_allocation_mb`, `members_disk_allocation_mb`, `members_cpu_allocation_count` conflicts with `node_count`,`node_cpu_allocation_count`, `node_disk_allocation_mb`, `node_memory_allocation_mb`. `group` conflicts with `node_` and `members_` arguments. Either members, node, or group arguments have to be provided. - `name` - (Required, String) A descriptive name that is used to identify the database instance. The name must not include spaces. - `plan` - (Required, Forces new resource, String) The name of the service plan that you choose for your instance. All databases use `standard`. `enterprise` is supported only for cassandra (`databases-for-cassandra`) and mongodb(`databases-for-mongodb`) * `plan_validation` - (Optional, bool) Enable or disable validating the database parameters for elasticsearch and postgres (more coming soon) during the plan phase. If not specified defaults to true. @@ -354,7 +430,7 @@ Review the argument reference that you can specify for your resource. - `point_in_time_recovery_time` - (Optional, String) The timestamp in UTC format that you want to restore to. To retrieve the timestamp, run the `ibmcloud cdb postgresql earliest-pitr-timestamp ` command. For more information, see [Point-in-time Recovery](https://cloud.ibm.com/docs/databases-for-postgresql?topic=databases-for-postgresql-pitr). - `remote_leader_id` - (Optional, String) A CRN of the leader database to make the replica(read-only) deployment. The leader database is created by a database deployment with the same service ID. A read-only replica is set up to replicate all of your data from the leader deployment to the replica deployment by using asynchronous replication. For more information, see [Configuring Read-only Replicas](https://cloud.ibm.com/docs/databases-for-postgresql?topic=databases-for-postgresql-read-only-replicas). - `resource_group_id` - (Optional, Forces new resource, String) The ID of the resource group where you want to create the instance. To retrieve this value, run `ibmcloud resource groups` or use the `ibm_resource_group` data source. If no value is provided, the `default` resource group is used. -- `service` - (Required, String) The type of Cloud Databases that you want to create. Only the following services are currently accepted: `databases-for-etcd`, `databases-for-postgresql`, `databases-for-redis`, `databases-for-elasticsearch`, `messages-for-rabbitmq`,`databases-for-mongodb`,`databases-for-mysql`, `databases-for-cassandra` and `databases-for-enterprisedb`. +- `service` - (Required, Forces new resource, String) The type of Cloud Databases that you want to create. Only the following services are currently accepted: `databases-for-etcd`, `databases-for-postgresql`, `databases-for-redis`, `databases-for-elasticsearch`, `messages-for-rabbitmq`,`databases-for-mongodb`,`databases-for-mysql`, `databases-for-cassandra` and `databases-for-enterprisedb`. - `service_endpoints` - (Optional, String) Specify whether you want to enable the public, private, or both service endpoints. Supported values are `public`, `private`, or `public-and-private`. The default is `public`. - `tags` (Optional, Array of Strings) A list of tags that you want to add to your instance. - `version` - (Optional, Forces new resource, String) The version of the database to be provisioned. If omitted, the database is created with the most recent major and minor version. @@ -364,14 +440,14 @@ Review the argument reference that you can specify for your resource. - `name` - (Optional, String) The user ID to add to the database instance. The user ID must be in the range 5 - 32 characters. - `password` - (Optional, String) The password for the user ID. The password must be in the range 10 - 32 characters. - `whitelist` - (Optional, List of Objects) A list of allowed IP addresses for the database. Multiple blocks are allowed. - + Nested scheme for `whitelist`: - `address` - (Optional, String) The IP address or range of database client addresses to be whitelisted in CIDR format. Example, `172.168.1.2/32`. - `description` - (Optional, String) A description for the allowed IP addresses range. ## Attribute reference -In addition to all argument references list, you can access the following attribute references after your resource is created. +In addition to all argument references list, you can access the following attribute references after your resource is created. - `adminuser` - (String) The user ID of the database administrator. Example, `admin` or `root`. - `configuration_schema` (String) Database Configuration Schema in JSON format.