Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_batch_pool: Support for identity #13779

Merged
merged 1 commit into from
Oct 21, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 70 additions & 0 deletions internal/services/batch/batch_pool_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,19 @@ import (
"github.com/hashicorp/terraform-provider-azurerm/helpers/azure"
"github.com/hashicorp/terraform-provider-azurerm/helpers/tf"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/identity"
"github.com/hashicorp/terraform-provider-azurerm/internal/services/batch/parse"
"github.com/hashicorp/terraform-provider-azurerm/internal/services/batch/validate"
msiparse "github.com/hashicorp/terraform-provider-azurerm/internal/services/msi/parse"
"github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk"
"github.com/hashicorp/terraform-provider-azurerm/internal/tf/suppress"
"github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation"
"github.com/hashicorp/terraform-provider-azurerm/internal/timeouts"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)

type batchPoolIdentity = identity.UserAssigned

func resourceBatchPool() *pluginsdk.Resource {
return &pluginsdk.Resource{
Create: resourceBatchPoolCreate,
Expand Down Expand Up @@ -282,6 +286,9 @@ func resourceBatchPool() *pluginsdk.Resource {
},
},
},

"identity": batchPoolIdentity{}.Schema(),

"start_task": {
Type: pluginsdk.TypeList,
Optional: true,
Expand Down Expand Up @@ -545,6 +552,12 @@ func resourceBatchPoolCreate(d *pluginsdk.ResourceData, meta interface{}) error
},
}

identity, err := expandBatchPoolIdentity(d.Get("identity").([]interface{}))
if err != nil {
return fmt.Errorf(`expanding "identity": %v`, err)
}
parameters.Identity = identity

scaleSettings, err := expandBatchPoolScaleSettings(d)
if err != nil {
return fmt.Errorf("expanding scale settings: %+v", err)
Expand Down Expand Up @@ -684,6 +697,12 @@ func resourceBatchPoolUpdate(d *pluginsdk.ResourceData, meta interface{}) error
PoolProperties: &batch.PoolProperties{},
}

identity, err := expandBatchPoolIdentity(d.Get("identity").([]interface{}))
if err != nil {
return fmt.Errorf(`expanding "identity": %v`, err)
}
parameters.Identity = identity

scaleSettings, err := expandBatchPoolScaleSettings(d)
if err != nil {
return fmt.Errorf("expanding scale settings: %+v", err)
Expand Down Expand Up @@ -762,6 +781,14 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error {
d.Set("account_name", id.BatchAccountName)
d.Set("resource_group_name", id.ResourceGroup)

identity, err := flattenBatchPoolIdentity(resp.Identity)
if err != nil {
return err
}
if err := d.Set("identity", identity); err != nil {
return fmt.Errorf("setting `identity`: %+v", err)
}

if props := resp.PoolProperties; props != nil {
d.Set("display_name", props.DisplayName)
d.Set("vm_size", props.VMSize)
Expand Down Expand Up @@ -956,3 +983,46 @@ func validateBatchPoolCrossFieldRules(pool *batch.Pool) error {

return nil
}

func expandBatchPoolIdentity(input []interface{}) (*batch.PoolIdentity, error) {
config, err := batchPoolIdentity{}.Expand(input)
if err != nil {
return nil, err
}

var identityMaps map[string]*batch.UserAssignedIdentities
if len(config.UserAssignedIdentityIds) != 0 {
identityMaps = make(map[string]*batch.UserAssignedIdentities, len(config.UserAssignedIdentityIds))
for _, id := range config.UserAssignedIdentityIds {
identityMaps[id] = &batch.UserAssignedIdentities{}
}
}

return &batch.PoolIdentity{
Type: batch.PoolIdentityType(config.Type),
UserAssignedIdentities: identityMaps,
}, nil
}

func flattenBatchPoolIdentity(input *batch.PoolIdentity) ([]interface{}, error) {
var config *identity.ExpandedConfig

if input == nil {
return []interface{}{}, nil
}

var identityIds []string
for id := range input.UserAssignedIdentities {
parsedId, err := msiparse.UserAssignedIdentityIDInsensitively(id)
if err != nil {
return nil, err
}
identityIds = append(identityIds, parsedId.ID())
}

config = &identity.ExpandedConfig{
Type: identity.Type(string(input.Type)),
UserAssignedIdentityIds: identityIds,
}
return batchPoolIdentity{}.Flatten(config), nil
}
110 changes: 110 additions & 0 deletions internal/services/batch/batch_pool_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,68 @@ func TestAccBatchPool_basic(t *testing.T) {
})
}

func TestAccBatchPool_identity(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.identity(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep("stop_pending_resize_operation"),
})
}

func TestAccBatchPool_identityUpdate(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.basic(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("vm_size").HasValue("STANDARD_A1"),
check.That(data.ResourceName).Key("node_agent_sku_id").HasValue("batch.node.ubuntu 16.04"),
check.That(data.ResourceName).Key("storage_image_reference.#").HasValue("1"),
check.That(data.ResourceName).Key("storage_image_reference.0.publisher").HasValue("Canonical"),
check.That(data.ResourceName).Key("storage_image_reference.0.sku").HasValue("16.04.0-LTS"),
check.That(data.ResourceName).Key("storage_image_reference.0.offer").HasValue("UbuntuServer"),
check.That(data.ResourceName).Key("fixed_scale.#").HasValue("1"),
check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("1"),
check.That(data.ResourceName).Key("start_task.#").HasValue("0"),
),
},
data.ImportStep("stop_pending_resize_operation"),
{
Config: r.identity(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep("stop_pending_resize_operation"),
{
Config: r.basic(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("vm_size").HasValue("STANDARD_A1"),
check.That(data.ResourceName).Key("node_agent_sku_id").HasValue("batch.node.ubuntu 16.04"),
check.That(data.ResourceName).Key("storage_image_reference.#").HasValue("1"),
check.That(data.ResourceName).Key("storage_image_reference.0.publisher").HasValue("Canonical"),
check.That(data.ResourceName).Key("storage_image_reference.0.sku").HasValue("16.04.0-LTS"),
check.That(data.ResourceName).Key("storage_image_reference.0.offer").HasValue("UbuntuServer"),
check.That(data.ResourceName).Key("fixed_scale.#").HasValue("1"),
check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("1"),
check.That(data.ResourceName).Key("start_task.#").HasValue("0"),
),
},
data.ImportStep("stop_pending_resize_operation"),
})
}

func TestAccBatchPool_requiresImport(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}
Expand Down Expand Up @@ -608,6 +670,54 @@ resource "azurerm_batch_pool" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString)
}

func (BatchPoolResource) identity(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "testaccRG-batch-%d"
location = "%s"
}

resource "azurerm_user_assigned_identity" "test" {
name = "acctest%s"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}

resource "azurerm_batch_account" "test" {
name = "testaccbatch%s"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}

resource "azurerm_batch_pool" "test" {
name = "testaccpool%s"
resource_group_name = azurerm_resource_group.test.name
account_name = azurerm_batch_account.test.name
node_agent_sku_id = "batch.node.ubuntu 16.04"
vm_size = "Standard_A1"
identity {
type = "UserAssigned"
identity_ids = [azurerm_user_assigned_identity.test.id]
}

fixed_scale {
target_dedicated_nodes = 1
}

storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04.0-LTS"
version = "latest"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString, data.RandomString)
}

func (BatchPoolResource) requiresImport(data acceptance.TestData) string {
return fmt.Sprintf(`
%s
Expand Down
11 changes: 11 additions & 0 deletions website/docs/r/batch_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ The following arguments are supported:

* `display_name` - (Optional) Specifies the display name of the Batch pool.

* `identity` - (Optional) An `identity` block as defined below.

* `max_tasks_per_node` - (Optional) Specifies the maximum number of tasks that can run concurrently on a single compute node in the pool. Defaults to `1`. Changing this forces a new resource to be created.

* `fixed_scale` - (Optional) A `fixed_scale` block that describes the scale settings when using fixed scale.
Expand All @@ -150,6 +152,15 @@ The following arguments are supported:
~> **Please Note:** `fixed_scale` and `auto_scale` blocks cannot be used both at the same time.

---

An `identity` block supports the following:

* `type` - (Required) The identity type of the Batch Account. Only possible values is `UserAssigned`.

* `identity_ids` - (Required) Specifies a list of user assigned identity ids.

---

A `storage_image_reference` block supports the following:

This block provisions virtual machines in the Batch Pool from one of two sources: an Azure Platform Image (e.g. Ubuntu/Windows Server) or a Custom Image.
Expand Down