Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kubernetes_cluster kubernetes_cluster_node_pool - message_of_day scale_down_mode workload_runtime load_balancer_profile/managed_outbound_ipv6_count #16741

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package containers

import (
"encoding/base64"
"fmt"
"log"
"time"
Expand Down Expand Up @@ -155,6 +156,13 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource {
ForceNew: true,
},

"message_of_the_day": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be ForceNew since we're checking for changes and updating the value below?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It can't be updated, I'll remove the codes checking for changes and updating.

ValidateFunc: validation.StringIsNotEmpty,
},

"mode": {
Type: pluginsdk.TypeString,
Optional: true,
Expand Down Expand Up @@ -454,6 +462,14 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int
profile.NodeTaints = nodeTaints
}

if v := d.Get("message_of_the_day").(string); v != "" {
if profile.OsType == containerservice.OSTypeWindows {
return fmt.Errorf("`message_of_the_day` cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script)")
}
messageOfTheDayEncoded := base64.StdEncoding.EncodeToString([]byte(v))
profile.MessageOfTheDay = &messageOfTheDayEncoded
}

if osDiskSizeGB := d.Get("os_disk_size_gb").(int); osDiskSizeGB > 0 {
profile.OsDiskSizeGB = utils.Int32(int32(osDiskSizeGB))
}
Expand Down Expand Up @@ -772,6 +788,16 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter
}
d.Set("max_count", maxCount)

messageOfTheDay := ""
if props.MessageOfTheDay != nil {
messageOfTheDayDecoded, err := base64.StdEncoding.DecodeString(*props.MessageOfTheDay)
if err != nil {
return fmt.Errorf("setting `message_of_the_day`: %+v", err)
}
messageOfTheDay = string(messageOfTheDayDecoded)
}
d.Set("message_of_the_day", messageOfTheDay)

maxPods := 0
if props.MaxPods != nil {
maxPods = int(*props.MaxPods)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1996,6 +1996,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
node_count = 3
fips_enabled = true
kubelet_disk_type = "OS"
message_of_the_day = "daily message"
}
`, r.templateConfig(data))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,28 @@ func TestAccKubernetesCluster_upgrade(t *testing.T) {
})
}

func TestAccKubernetesCluster_scaleDownMode(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.scaleDownMode(data, "Delete"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
{
Config: r.scaleDownMode(data, "Deallocate"),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func TestAccKubernetesCluster_tags(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}
Expand Down Expand Up @@ -1197,11 +1219,13 @@ resource "azurerm_kubernetes_cluster" "test" {
dns_prefix = "acctestaks%d"

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
fips_enabled = true
kubelet_disk_type = "OS"
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
fips_enabled = true
kubelet_disk_type = "OS"
message_of_the_day = "daily message"
workload_runtime = "OCIContainer"
}

identity {
Expand Down Expand Up @@ -1890,6 +1914,37 @@ resource "azurerm_kubernetes_cluster" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, ultraSSDEnabled)
}

func (KubernetesClusterResource) scaleDownMode(data acceptance.TestData, scaleDownMode string) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}

resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
scale_down_mode = "%s"
}

identity {
type = "SystemAssigned"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, scaleDownMode)
}

func (KubernetesClusterResource) privateClusterPublicFqdn(data acceptance.TestData, privateClusterPublicFqdnEnabled bool) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
32 changes: 32 additions & 0 deletions internal/services/containers/kubernetes_cluster_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,13 @@ func resourceKubernetesCluster() *pluginsdk.Resource {
ValidateFunc: validation.IntBetween(1, 100),
ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids", "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"},
},
"managed_outbound_ipv6_count": {
Type: pluginsdk.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntBetween(1, 100),
ConflictsWith: []string{"network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids", "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"},
},
"outbound_ip_prefix_ids": {
Type: pluginsdk.TypeSet,
Optional: true,
Expand Down Expand Up @@ -1485,6 +1492,18 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{}
loadBalancerProfile.OutboundIPPrefixes = nil
}

if key := "network_profile.0.load_balancer_profile.0.managed_outbound_ipv6_count"; d.HasChange(key) {
managedOutboundIPV6Count := d.Get(key).(int)
if loadBalancerProfile.ManagedOutboundIPs == nil {
loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{}
}
loadBalancerProfile.ManagedOutboundIPs.CountIPv6 = utils.Int32(int32(managedOutboundIPV6Count))

// fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.
loadBalancerProfile.OutboundIPs = nil
loadBalancerProfile.OutboundIPPrefixes = nil
}

if key := "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"; d.HasChange(key) {
outboundIPAddress := d.Get(key)
if v := outboundIPAddress.(*pluginsdk.Set).List(); len(v) == 0 {
Expand Down Expand Up @@ -2310,6 +2329,15 @@ func expandLoadBalancerProfile(d []interface{}) *containerservice.ManagedCluster
}
}

if ipv6Count := config["managed_outbound_ipv6_count"]; ipv6Count != nil {
if c := int32(ipv6Count.(int)); c > 0 {
if profile.ManagedOutboundIPs == nil {
profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{}
}
profile.ManagedOutboundIPs.CountIPv6 = &c
}
}

if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil {
profile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes}
}
Expand Down Expand Up @@ -2441,6 +2469,10 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro
if count := ips.Count; count != nil {
lb["managed_outbound_ip_count"] = count
}

if countIPv6 := ips.CountIPv6; countIPv6 != nil {
lb["managed_outbound_ipv6_count"] = countIPv6
}
}

if oip := lbp.OutboundIPs; oip != nil {
Expand Down
67 changes: 67 additions & 0 deletions internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package containers

import (
"encoding/base64"
"fmt"
"regexp"
"strconv"
Expand Down Expand Up @@ -117,6 +118,13 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
ForceNew: true,
},

"message_of_the_day": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
},

"min_count": {
Type: pluginsdk.TypeInt,
Optional: true,
Expand Down Expand Up @@ -226,6 +234,17 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
ForceNew: true,
},

"scale_down_mode": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
Default: string(containerservice.ScaleDownModeDelete),
ValidateFunc: validation.StringInSlice([]string{
string(containerservice.ScaleDownModeDeallocate),
string(containerservice.ScaleDownModeDelete),
}, false),
},

"host_group_id": {
Type: pluginsdk.TypeString,
Optional: true,
Expand All @@ -234,6 +253,15 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
},

"upgrade_settings": upgradeSettingsSchema(),

"workload_runtime": {
Type: pluginsdk.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{
string(containerservice.WorkloadRuntimeOCIContainer),
}, false),
},
}

s["zones"] = commonschema.ZonesMultipleOptionalForceNew()
Expand Down Expand Up @@ -605,6 +633,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA
MaxPods: defaultCluster.MaxPods,
OsType: defaultCluster.OsType,
MaxCount: defaultCluster.MaxCount,
MessageOfTheDay: defaultCluster.MessageOfTheDay,
MinCount: defaultCluster.MinCount,
EnableAutoScaling: defaultCluster.EnableAutoScaling,
EnableFIPS: defaultCluster.EnableFIPS,
Expand All @@ -622,8 +651,10 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA
NodeLabels: defaultCluster.NodeLabels,
NodeTaints: defaultCluster.NodeTaints,
PodSubnetID: defaultCluster.PodSubnetID,
ScaleDownMode: defaultCluster.ScaleDownMode,
Tags: defaultCluster.Tags,
UpgradeSettings: defaultCluster.UpgradeSettings,
WorkloadRuntime: defaultCluster.WorkloadRuntime,
},
}
}
Expand Down Expand Up @@ -688,6 +719,11 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag
profile.MaxPods = utils.Int32(maxPods)
}

if v := raw["message_of_the_day"].(string); v != "" {
messageOfTheDayEncoded := base64.StdEncoding.EncodeToString([]byte(v))
profile.MessageOfTheDay = &messageOfTheDayEncoded
}

if prefixID := raw["node_public_ip_prefix_id"].(string); prefixID != "" {
profile.NodePublicIPPrefixID = utils.String(prefixID)
}
Expand All @@ -709,6 +745,11 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag
profile.PodSubnetID = utils.String(podSubnetID)
}

profile.ScaleDownMode = containerservice.ScaleDownModeDelete
if scaleDownMode := raw["scale_down_mode"].(string); scaleDownMode != "" {
profile.ScaleDownMode = containerservice.ScaleDownMode(scaleDownMode)
}

if ultraSSDEnabled, ok := raw["ultra_ssd_enabled"]; ok {
profile.EnableUltraSSD = utils.Bool(ultraSSDEnabled.(bool))
}
Expand All @@ -729,6 +770,10 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag
profile.ProximityPlacementGroupID = utils.String(proximityPlacementGroupId)
}

if workloadRunTime := raw["workload_runtime"].(string); workloadRunTime != "" {
profile.WorkloadRuntime = containerservice.WorkloadRuntime(workloadRunTime)
}

if capacityReservationGroupId := raw["capacity_reservation_group_id"].(string); capacityReservationGroupId != "" {
profile.CapacityReservationGroupID = utils.String(capacityReservationGroupId)
}
Expand Down Expand Up @@ -1013,6 +1058,15 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
maxPods = int(*agentPool.MaxPods)
}

messageOfTheDay := ""
if agentPool.MessageOfTheDay != nil {
messageOfTheDayDecoded, err := base64.StdEncoding.DecodeString(*agentPool.MessageOfTheDay)
if err != nil {
return nil, err
}
messageOfTheDay = string(messageOfTheDayDecoded)
}

minCount := 0
if agentPool.MinCount != nil {
minCount = int(*agentPool.MinCount)
Expand Down Expand Up @@ -1080,6 +1134,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
proximityPlacementGroupId = *agentPool.ProximityPlacementGroupID
}

scaleDownMode := containerservice.ScaleDownModeDelete
if agentPool.ScaleDownMode != "" {
scaleDownMode = agentPool.ScaleDownMode
}

vmSize := ""
if agentPool.VMSize != nil {
vmSize = *agentPool.VMSize
Expand All @@ -1089,6 +1148,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
capacityReservationGroupId = *agentPool.CapacityReservationGroupID
}

workloadRunTime := ""
if agentPool.WorkloadRuntime != "" {
workloadRunTime = string(agentPool.WorkloadRuntime)
}

upgradeSettings := flattenUpgradeSettings(agentPool.UpgradeSettings)
linuxOSConfig, err := flattenAgentPoolLinuxOSConfig(agentPool.LinuxOSConfig)
if err != nil {
Expand All @@ -1104,6 +1168,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
"kubelet_disk_type": string(agentPool.KubeletDiskType),
"max_count": maxCount,
"max_pods": maxPods,
"message_of_the_day": messageOfTheDay,
"min_count": minCount,
"name": name,
"node_count": count,
Expand All @@ -1113,10 +1178,12 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
"os_disk_size_gb": osDiskSizeGB,
"os_disk_type": string(osDiskType),
"os_sku": string(agentPool.OsSKU),
"scale_down_mode": string(scaleDownMode),
"tags": tags.Flatten(agentPool.Tags),
"type": string(agentPool.Type),
"ultra_ssd_enabled": enableUltraSSD,
"vm_size": vmSize,
"workload_runtime": workloadRunTime,
"pod_subnet_id": podSubnetId,
"orchestrator_version": orchestratorVersion,
"proximity_placement_group_id": proximityPlacementGroupId,
Expand Down
Loading