diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md
index da007182cb..7f2463d4ee 100644
--- a/docs/clusterdefinition.md
+++ b/docs/clusterdefinition.md
@@ -60,7 +60,7 @@ To learn more about supported orchestrators and versions, run the orchestrators
| gcLowThreshold | no | Sets the --image-gc-low-threshold value on the kublet configuration. Default is 80. [See kubelet Garbage Collection](https://kubernetes.io/docs/concepts/cluster-administration/kubelet-garbage-collection/) |
| kubeletConfig | no | Configure various runtime configuration for kubelet. See `kubeletConfig` [below](#feat-kubelet-config) |
| kubernetesImageBase | no | Specifies the base URL (everything preceding the actual image filename) of the kubernetes hyperkube image to use for cluster deployment, e.g., `k8s.gcr.io/` |
-| loadBalancerSku | no | Sku of Load Balancer and Public IP. Candidate values are: `basic` and `standard`. If not set, it will be default to basic. Requires Kubernetes 1.11 or newer. NOTE: VMs behind ILB standard SKU will not be able to access the internet without ELB configured with at least one frontend IP as described in the [standard loadbalancer outbound connectivity doc](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-standard-overview#control-outbound-connectivity). For Kubernetes 1.11, We have created an external loadbalancer service in the kube-system namespace as a workaround to this issue. Starting k8s 1.12, instead of creating an ELB service, we will setup outbound rules in ARM template once the API is available. |
+| loadBalancerSku | no | Sku of Load Balancer and Public IP. Candidate values are: `basic` and `standard`. If not set, it will be default to basic. Requires Kubernetes 1.11 or newer. NOTE: VMs behind ILB standard SKU will not be able to access the internet without ELB configured with at least one frontend IP as described in the [standard loadbalancer outbound connectivity doc](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-standard-overview#control-outbound-connectivity). For Kubernetes 1.11 and 1.12, We have created an external loadbalancer service in the kube-system namespace as a workaround to this issue. Starting k8s 1.13, instead of creating an ELB service, we will setup outbound rules in ARM template once the API is available. |
| networkPlugin | no | Specifies the network plugin implementation for the cluster. Valid values are:
`"azure"` (default), which provides an Azure native networking experience
`"kubenet"` for k8s software networking implementation.
`"flannel"` for using CoreOS Flannel
`"cilium"` for using the default Cilium CNI IPAM |
| networkPolicy | no | Specifies the network policy enforcement tool for the cluster (currently Linux-only). Valid values are:
`"calico"` for Calico network policy.
`"cilium"` for cilium network policy (Lin), and `"azure"` (experimental) for Azure CNI-compliant network policy (note: Azure CNI-compliant network policy requires explicit `"networkPlugin": "azure"` configuration as well).
See [network policy examples](../examples/networkpolicy) for more information. |
| privateCluster | no | Build a cluster without public addresses assigned. See `privateClusters` [below](#feat-private-cluster). |
@@ -508,7 +508,7 @@ A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for
| ---------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| availabilityProfile | no | Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`. |
| count | yes | Describes the node count |
-| availabilityZones | no | To protect your cluster from datacenter-level failures, you can provide Availability Zones for each of your agentPool. Only applies to Kubernetes clusters version 1.12+. Supported values are arrays of strings, each representing a supported availability zone in a region for your subscription. e.g. `"availabilityZones": ["1","2"]` represents zone 1 and zone 2 can be used. To get supported zones for a region in your subscription, run `az vm list-skus --location centralus --query "[?name=='Standard_DS2_v2'].[locationInfo, restrictions"] -o table`. You should see values like `'zones': ['2', '3', '1']` appear in the first column. If `NotAvailableForSubscription` appears in the output, then you need to create an Azure support ticket to enable zones for that region. Note: For availability zones, only standard load balancer is supported. ([Availability zone example](../examples/e2e-tests/kubernetes/zones)). |
+| availabilityZones | no | To protect your cluster from datacenter-level failures, you can provide Availability Zones for all the agentPools and master profiles in your cluster. Only applies to Kubernetes clusters version 1.12+. Supported values are arrays of strings, each representing a supported availability zone in a region for your subscription. e.g. `"availabilityZones": ["1","2"]` represents zone 1 and zone 2 can be used. To get supported zones for a region in your subscription, run `az vm list-skus --location centralus --query "[?name=='Standard_DS2_v2'].[locationInfo, restrictions"] -o table`. You should see values like `'zones': ['2', '3', '1']` appear in the first column. If `NotAvailableForSubscription` appears in the output, then you need to create an Azure support ticket to enable zones for that region. Note: For availability zones, only standard load balancer is supported. ([Availability zone example](../examples/e2e-tests/kubernetes/zones)). To ensure high availability, each profile must define at least two nodes per zone. e.g. An agent pool profile with `"availabilityZones": ["1","2"]` must have at least 4 nodes total with `"count": 4`. |
| singlePlacementGroup | no | Supported values are `true` (default) and `false`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. `true`: A VMSS with a single placement group and has a range of 0-100 VMs. `false`: A VMSS with multiple placement groups and has a range of 0-1,000 VMs. For more information, check out [virtual machine scale sets placement groups](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups). |
| scaleSetPriority | no | Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority). |
| scaleSetEvictionPolicy | no | Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`. |
diff --git a/examples/e2e-tests/kubernetes/zones/definition.json b/examples/e2e-tests/kubernetes/zones/definition.json
index 1b453998d3..573058c1d3 100644
--- a/examples/e2e-tests/kubernetes/zones/definition.json
+++ b/examples/e2e-tests/kubernetes/zones/definition.json
@@ -6,15 +6,21 @@
"orchestratorRelease": "1.12"
},
"masterProfile": {
- "count": 1,
+ "count": 5,
"dnsPrefix": "",
- "vmSize": "Standard_DS2_v2"
+ "vmSize": "Standard_DS2_v2",
+ "availabilityProfile": "VirtualMachineScaleSets",
+ "availabilityZones": [
+ "1",
+ "2"
+ ]
},
"agentPoolProfiles": [
{
"name": "agentpool",
"count": 4,
"vmSize": "Standard_DS2_v2",
+ "availabilityProfile": "VirtualMachineScaleSets",
"availabilityZones": [
"1",
"2"
diff --git a/parts/k8s/kubernetesmasterresourcesvmss.t b/parts/k8s/kubernetesmasterresourcesvmss.t
index b17142c514..eb9067ece5 100644
--- a/parts/k8s/kubernetesmasterresourcesvmss.t
+++ b/parts/k8s/kubernetesmasterresourcesvmss.t
@@ -186,14 +186,21 @@
},
{{end}}
{
- "apiVersion": "[variables('apiVersionDefault')]",
+ "apiVersion": "2017-08-01",
"location": "[variables('location')]",
"name": "[variables('masterPublicIPAddressName')]",
"properties": {
"dnsSettings": {
"domainNameLabel": "[variables('masterFqdnPrefix')]"
},
+ {{ if .MasterProfile.HasAvailabilityZones}}
+ "publicIPAllocationMethod": "Static"
+ {{else}}
"publicIPAllocationMethod": "Dynamic"
+ {{end}}
+ },
+ "sku": {
+ "name": "[variables('loadBalancerSku')]"
},
"type": "Microsoft.Network/publicIPAddresses"
},
@@ -201,7 +208,7 @@
"type": "Microsoft.Network/loadBalancers",
"name": "[variables('masterLbName')]",
"location": "[variables('location')]",
- "apiVersion": "[variables('apiVersionDefault')]",
+ "apiVersion": "2017-08-01",
"dependsOn": [
"[concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIPAddressName'))]"
],
@@ -269,6 +276,9 @@
}
}
]
+ },
+ "sku": {
+ "name": "[variables('loadBalancerSku')]"
}
},
{
@@ -290,6 +300,9 @@
"poolName": "master"
},
"location": "[variables('location')]",
+ {{ if .MasterProfile.HasAvailabilityZones}}
+ "zones": "[parameters('availabilityZones')]",
+ {{ end }}
"name": "[concat(variables('masterVMNamePrefix'), 'vmss')]",
{{if UseManagedIdentity}}
{{if UserAssignedIDEnabled}}
@@ -311,6 +324,7 @@
"name": "[parameters('masterVMSize')]"
},
"properties": {
+ "singlePlacementGroup": {{ .MasterProfile.SinglePlacementGroup}},
"overprovision": false,
"upgradePolicy": {
"mode": "Manual"
diff --git a/parts/masterparams.t b/parts/masterparams.t
index ea98751350..60deac3b33 100644
--- a/parts/masterparams.t
+++ b/parts/masterparams.t
@@ -56,6 +56,14 @@
"type": "string"
},
{{end}}
+ {{if .MasterProfile.HasAvailabilityZones}}
+ "availabilityZones": {
+ "metadata": {
+ "description": "Master availability zones"
+ },
+ "type": "array"
+ },
+ {{end}}
{{end}}
{{if IsHostedMaster}}
"masterSubnet": {
diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go
index cc2f88635e..65ad28f151 100644
--- a/pkg/acsengine/defaults.go
+++ b/pkg/acsengine/defaults.go
@@ -242,7 +242,7 @@ func setPropertiesDefaults(cs *api.ContainerService, isUpgrade, isScale bool) (b
setStorageDefaults(properties)
setExtensionDefaults(properties)
- setVMSSDefaults(properties)
+ setVMSSDefaultsForAgents(properties)
// Set hosted master profile defaults if this cluster configuration has a hosted control plane
if cs.Properties.HostedMasterProfile != nil {
@@ -582,10 +582,26 @@ func setMasterProfileDefaults(a *api.Properties, isUpgrade bool) {
if a.MasterProfile.HTTPSourceAddressPrefix == "" {
a.MasterProfile.HTTPSourceAddressPrefix = "*"
}
+ // Set VMSS Defaults for Masters
+ if a.MasterProfile.IsVirtualMachineScaleSets() {
+ if a.MasterProfile.Count > 100 {
+ a.MasterProfile.SinglePlacementGroup = helpers.PointerToBool(false)
+ }
+ if a.MasterProfile.SinglePlacementGroup == nil {
+ a.MasterProfile.SinglePlacementGroup = helpers.PointerToBool(api.DefaultSinglePlacementGroup)
+ }
+ if a.MasterProfile.SinglePlacementGroup == helpers.PointerToBool(false) {
+ a.MasterProfile.StorageProfile = api.ManagedDisks
+ }
+ if a.MasterProfile.HasAvailabilityZones() {
+ a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard"
+ a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB = helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB)
+ }
+ }
}
-// setVMSSDefaults
-func setVMSSDefaults(a *api.Properties) {
+// setVMSSDefaultsForAgents
+func setVMSSDefaultsForAgents(a *api.Properties) {
for _, profile := range a.AgentPoolProfiles {
if profile.AvailabilityProfile == api.VirtualMachineScaleSets {
if profile.Count > 100 {
diff --git a/pkg/acsengine/defaults_test.go b/pkg/acsengine/defaults_test.go
index b7c8395bc0..14a467346f 100644
--- a/pkg/acsengine/defaults_test.go
+++ b/pkg/acsengine/defaults_test.go
@@ -714,32 +714,86 @@ func TestIsAzureCNINetworkmonitorAddon(t *testing.T) {
}
}
-// TestSetVMSSDefaults covers tests for setVMSSDefaults
-func TestSetVMSSDefaults(t *testing.T) {
- mockCS := getMockBaseContainerService("1.10.3")
+// TestSetVMSSDefaultsAndZones covers tests for setVMSSDefaultsForAgents and masters
+func TestSetVMSSDefaultsAndZones(t *testing.T) {
+ // masters with vmss and no zones
+ mockCS := getMockBaseContainerService("1.12.0-beta.0")
properties := mockCS.Properties
properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
+ properties.MasterProfile.AvailabilityProfile = api.VirtualMachineScaleSets
+ setPropertiesDefaults(&mockCS, false, false)
+ if properties.MasterProfile.HasAvailabilityZones() {
+ t.Fatalf("MasterProfile.HasAvailabilityZones did not have the expected return, got %t, expected %t",
+ properties.MasterProfile.HasAvailabilityZones(), false)
+ }
+ if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != api.DefaultLoadBalancerSku {
+ t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s",
+ properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, api.DefaultLoadBalancerSku)
+ }
+ // masters with vmss and zones
+ mockCS = getMockBaseContainerService("1.12.0-beta.0")
+ properties = mockCS.Properties
+ properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
+ properties.MasterProfile.AvailabilityProfile = api.VirtualMachineScaleSets
+ properties.MasterProfile.AvailabilityZones = []string{"1", "2"}
+ setPropertiesDefaults(&mockCS, false, false)
+ if *properties.MasterProfile.SinglePlacementGroup != api.DefaultSinglePlacementGroup {
+ t.Fatalf("MasterProfile.SinglePlacementGroup default did not have the expected configuration, got %t, expected %t",
+ *properties.MasterProfile.SinglePlacementGroup, api.DefaultSinglePlacementGroup)
+ }
+ if !properties.MasterProfile.HasAvailabilityZones() {
+ t.Fatalf("MasterProfile.HasAvailabilityZones did not have the expected return, got %t, expected %t",
+ properties.MasterProfile.HasAvailabilityZones(), true)
+ }
+ if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" {
+ t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s",
+ properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, "Standard")
+ }
+ if *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != api.DefaultExcludeMasterFromStandardLB {
+ t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t",
+ *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB)
+ }
+ // agents with vmss and no zones
+ mockCS = getMockBaseContainerService("1.12.0-beta.0")
+ properties = mockCS.Properties
+ properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
+ properties.AgentPoolProfiles[0].Count = 4
+ setPropertiesDefaults(&mockCS, false, false)
+ if properties.AgentPoolProfiles[0].HasAvailabilityZones() {
+ t.Fatalf("AgentPoolProfiles[0].HasAvailabilityZones did not have the expected return, got %t, expected %t",
+ properties.AgentPoolProfiles[0].HasAvailabilityZones(), false)
+ }
+ if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != api.DefaultLoadBalancerSku {
+ t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s",
+ properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, api.DefaultLoadBalancerSku)
+ }
+ // agents with vmss and zones
+ mockCS = getMockBaseContainerService("1.12.0-beta.0")
+ properties = mockCS.Properties
+ properties.OrchestratorProfile.OrchestratorType = "Kubernetes"
properties.AgentPoolProfiles[0].Count = 4
+ properties.AgentPoolProfiles[0].AvailabilityZones = []string{"1", "2"}
setPropertiesDefaults(&mockCS, false, false)
if !properties.AgentPoolProfiles[0].IsVirtualMachineScaleSets() {
t.Fatalf("AgentPoolProfile[0].AvailabilityProfile did not have the expected configuration, got %s, expected %s",
properties.AgentPoolProfiles[0].AvailabilityProfile, api.VirtualMachineScaleSets)
}
-
+ if !properties.AgentPoolProfiles[0].HasAvailabilityZones() {
+ t.Fatalf("AgentPoolProfiles[0].HasAvailabilityZones did not have the expected return, got %t, expected %t",
+ properties.AgentPoolProfiles[0].HasAvailabilityZones(), true)
+ }
if *properties.AgentPoolProfiles[0].SinglePlacementGroup != api.DefaultSinglePlacementGroup {
t.Fatalf("AgentPoolProfile[0].SinglePlacementGroup default did not have the expected configuration, got %t, expected %t",
*properties.AgentPoolProfiles[0].SinglePlacementGroup, api.DefaultSinglePlacementGroup)
}
- if properties.AgentPoolProfiles[0].HasAvailabilityZones() {
- if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" {
- t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s",
- properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, "Standard")
- }
- if properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB) {
- t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t",
- *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB)
- }
+ if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" {
+ t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s",
+ properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, "Standard")
+ }
+ if *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != api.DefaultExcludeMasterFromStandardLB {
+ t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t",
+ *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB)
}
properties.AgentPoolProfiles[0].Count = 110
diff --git a/pkg/acsengine/params.go b/pkg/acsengine/params.go
index 3e56f4152f..3e10ec75c8 100644
--- a/pkg/acsengine/params.go
+++ b/pkg/acsengine/params.go
@@ -69,6 +69,9 @@ func getParameters(cs *api.ContainerService, generatorCode string, acsengineVers
}
addValue(parametersMap, "firstConsecutiveStaticIP", properties.MasterProfile.FirstConsecutiveStaticIP)
addValue(parametersMap, "masterVMSize", properties.MasterProfile.VMSize)
+ if properties.MasterProfile.HasAvailabilityZones() {
+ addValue(parametersMap, "availabilityZones", properties.MasterProfile.AvailabilityZones)
+ }
}
if properties.HostedMasterProfile != nil {
addValue(parametersMap, "masterSubnet", properties.HostedMasterProfile.Subnet)
diff --git a/pkg/api/converterfromapi.go b/pkg/api/converterfromapi.go
index 0cd39deb46..6de9f03a44 100644
--- a/pkg/api/converterfromapi.go
+++ b/pkg/api/converterfromapi.go
@@ -923,6 +923,8 @@ func convertMasterProfileToVLabs(api *MasterProfile, vlabsProfile *vlabs.MasterP
}
vlabsProfile.AvailabilityProfile = api.AvailabilityProfile
vlabsProfile.AgentSubnet = api.AgentSubnet
+ vlabsProfile.AvailabilityZones = api.AvailabilityZones
+ vlabsProfile.SinglePlacementGroup = api.SinglePlacementGroup
convertCustomFilesToVlabs(api, vlabsProfile)
}
diff --git a/pkg/api/convertertoapi.go b/pkg/api/convertertoapi.go
index 4b82003a94..07da92e183 100644
--- a/pkg/api/convertertoapi.go
+++ b/pkg/api/convertertoapi.go
@@ -933,6 +933,8 @@ func convertVLabsMasterProfile(vlabs *vlabs.MasterProfile, api *MasterProfile) {
api.AvailabilityProfile = vlabs.AvailabilityProfile
api.AgentSubnet = vlabs.AgentSubnet
+ api.AvailabilityZones = vlabs.AvailabilityZones
+ api.SinglePlacementGroup = vlabs.SinglePlacementGroup
convertCustomFilesToAPI(vlabs, api)
}
diff --git a/pkg/api/types.go b/pkg/api/types.go
index 0cda7e4872..db6fe18ae0 100644
--- a/pkg/api/types.go
+++ b/pkg/api/types.go
@@ -411,6 +411,8 @@ type MasterProfile struct {
CustomFiles *[]CustomFile `json:"customFiles,omitempty"`
AvailabilityProfile string `json:"availabilityProfile"`
AgentSubnet string `json:"agentSubnet,omitempty"`
+ AvailabilityZones []string `json:"availabilityZones,omitempty"`
+ SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"`
// Master LB public endpoint/FQDN with port
// The format will be FQDN:2376
@@ -715,6 +717,17 @@ func (p *Properties) HasVirtualMachineScaleSets() bool {
return false
}
+// HasAllZonesAgentPools will return true if all of the agent pools have zones
+func (p *Properties) HasAllZonesAgentPools() bool {
+ count := 0
+ for _, ap := range p.AgentPoolProfiles {
+ if ap.HasAvailabilityZones() {
+ count++
+ }
+ }
+ return count == len(p.AgentPoolProfiles)
+}
+
// IsCustomVNET returns true if the customer brought their own VNET
func (m *MasterProfile) IsCustomVNET() bool {
return len(m.VnetSubnetID) > 0
@@ -774,6 +787,11 @@ func (m *MasterProfile) GetFirstConsecutiveStaticIPAddress(subnetStr string) str
return subnet.IP.String()
}
+// HasAvailabilityZones returns true if the master profile has availability zones
+func (m *MasterProfile) HasAvailabilityZones() bool {
+ return m.AvailabilityZones != nil && len(m.AvailabilityZones) > 0
+}
+
// IsCustomVNET returns true if the customer brought their own VNET
func (a *AgentPoolProfile) IsCustomVNET() bool {
return len(a.VnetSubnetID) > 0
diff --git a/pkg/api/types_test.go b/pkg/api/types_test.go
index 76b3011dcf..fd39f5a91c 100644
--- a/pkg/api/types_test.go
+++ b/pkg/api/types_test.go
@@ -547,6 +547,88 @@ func TestIsCustomVNET(t *testing.T) {
}
+func TestHasAvailabilityZones(t *testing.T) {
+ cases := []struct {
+ p Properties
+ expectedMaster bool
+ expectedAgent bool
+ expectedAllZones bool
+ }{
+ {
+ p: Properties{
+ MasterProfile: &MasterProfile{
+ Count: 1,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ AgentPoolProfiles: []*AgentPoolProfile{
+ {
+ Count: 1,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ {
+ Count: 1,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ },
+ },
+ expectedMaster: true,
+ expectedAgent: true,
+ expectedAllZones: true,
+ },
+ {
+ p: Properties{
+ MasterProfile: &MasterProfile{
+ Count: 1,
+ },
+ AgentPoolProfiles: []*AgentPoolProfile{
+ {
+ Count: 1,
+ },
+ {
+ Count: 1,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ },
+ },
+ expectedMaster: false,
+ expectedAgent: false,
+ expectedAllZones: false,
+ },
+ {
+ p: Properties{
+ MasterProfile: &MasterProfile{
+ Count: 1,
+ },
+ AgentPoolProfiles: []*AgentPoolProfile{
+ {
+ Count: 1,
+ AvailabilityZones: []string{},
+ },
+ {
+ Count: 1,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ },
+ },
+ expectedMaster: false,
+ expectedAgent: false,
+ expectedAllZones: false,
+ },
+ }
+
+ for _, c := range cases {
+ if c.p.MasterProfile.HasAvailabilityZones() != c.expectedMaster {
+ t.Fatalf("expected HasAvailabilityZones() to return %t but instead returned %t", c.expectedMaster, c.p.MasterProfile.HasAvailabilityZones())
+ }
+ if c.p.AgentPoolProfiles[0].HasAvailabilityZones() != c.expectedAgent {
+ t.Fatalf("expected HasAvailabilityZones() to return %t but instead returned %t", c.expectedAgent, c.p.AgentPoolProfiles[0].HasAvailabilityZones())
+ }
+ if c.p.HasAllZonesAgentPools() != c.expectedAllZones {
+ t.Fatalf("expected HasAllZonesAgentPools() to return %t but instead returned %t", c.expectedAllZones, c.p.HasAllZonesAgentPools())
+ }
+ }
+}
+
func TestRequireRouteTable(t *testing.T) {
cases := []struct {
p Properties
diff --git a/pkg/api/vlabs/types.go b/pkg/api/vlabs/types.go
index 09f67aad7b..d52b5f2a08 100644
--- a/pkg/api/vlabs/types.go
+++ b/pkg/api/vlabs/types.go
@@ -379,6 +379,8 @@ type MasterProfile struct {
CustomFiles *[]CustomFile `json:"customFiles,omitempty"`
AvailabilityProfile string `json:"availabilityProfile"`
AgentSubnet string `json:"agentSubnet,omitempty"`
+ AvailabilityZones []string `json:"availabilityZones,omitempty"`
+ SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"`
// subnet is internal
subnet string
@@ -507,12 +509,16 @@ func (p *Properties) HasWindows() bool {
// HasAvailabilityZones returns true if the cluster contains pools with zones
func (p *Properties) HasAvailabilityZones() bool {
- for _, agentPoolProfile := range p.AgentPoolProfiles {
- if agentPoolProfile.HasAvailabilityZones() {
- return true
+ hasZones := p.MasterProfile != nil && p.MasterProfile.HasAvailabilityZones()
+ if !hasZones && p.AgentPoolProfiles != nil {
+ for _, agentPoolProfile := range p.AgentPoolProfiles {
+ if agentPoolProfile.HasAvailabilityZones() {
+ hasZones = true
+ break
+ }
}
}
- return false
+ return hasZones
}
// IsCustomVNET returns true if the customer brought their own VNET
@@ -555,12 +561,31 @@ func (m *MasterProfile) IsVirtualMachineScaleSets() bool {
return m.AvailabilityProfile == VirtualMachineScaleSets
}
-// IsAllVirtualMachineScaleSets returns true if the cluster contains only Virtual Machine Scale Sets
-func (p *Properties) IsAllVirtualMachineScaleSets() bool {
- isAll := p.MasterProfile.IsVirtualMachineScaleSets()
- if isAll {
+// HasAvailabilityZones returns true if the master profile has availability zones
+func (m *MasterProfile) HasAvailabilityZones() bool {
+ return m.AvailabilityZones != nil && len(m.AvailabilityZones) > 0
+}
+
+// IsClusterAllAvailabilityZones returns true if the cluster contains AZs for all agents and masters profiles
+func (p *Properties) IsClusterAllAvailabilityZones() bool {
+ isAll := p.MasterProfile != nil && p.MasterProfile.HasAvailabilityZones()
+ if isAll && p.AgentPoolProfiles != nil {
+ for _, agentPoolProfile := range p.AgentPoolProfiles {
+ if !agentPoolProfile.HasAvailabilityZones() {
+ isAll = false
+ break
+ }
+ }
+ }
+ return isAll
+}
+
+// IsClusterAllVirtualMachineScaleSets returns true if the cluster contains only Virtual Machine Scale Sets
+func (p *Properties) IsClusterAllVirtualMachineScaleSets() bool {
+ isAll := p.MasterProfile != nil && p.MasterProfile.IsVirtualMachineScaleSets()
+ if isAll && p.AgentPoolProfiles != nil {
for _, agentPoolProfile := range p.AgentPoolProfiles {
- if agentPoolProfile.AvailabilityProfile != VirtualMachineScaleSets {
+ if agentPoolProfile.AvailabilityProfile == AvailabilitySet {
isAll = false
break
}
diff --git a/pkg/api/vlabs/types_test.go b/pkg/api/vlabs/types_test.go
index 33d396b49a..9e1688f800 100644
--- a/pkg/api/vlabs/types_test.go
+++ b/pkg/api/vlabs/types_test.go
@@ -120,8 +120,8 @@ func TestMasterProfile(t *testing.T) {
t.Fatalf("unexpectedly detected MasterProfile.AvailabilitySets == VirtualMachineScaleSets after unmarshal")
}
- // With vmss
- MasterProfileText = `{ "count": 3, "vmSize": "Standard_D2_v2", "availabilityProfile": "VirtualMachineScaleSets", "storageProfile" : "ManagedDisks", "diskSizesGB" : [750, 250, 600, 1000] }`
+ // With vmss and zones
+ MasterProfileText = `{ "count": 3, "vmSize": "Standard_D2_v2", "availabilityProfile": "VirtualMachineScaleSets", "storageProfile" : "ManagedDisks", "diskSizesGB" : [750, 250, 600, 1000], "AvailabilityZones": ["1","2"] }`
mp = &MasterProfile{}
if e := json.Unmarshal([]byte(MasterProfileText), mp); e != nil {
t.Fatalf("unexpectedly detected unmarshal failure for MasterProfile, %+v", e)
@@ -138,6 +138,10 @@ func TestMasterProfile(t *testing.T) {
if !mp.IsVirtualMachineScaleSets() {
t.Fatalf("unexpectedly detected MasterProfile.AvailabilitySets != VirtualMachineScaleSets after unmarshal")
}
+
+ if !mp.HasAvailabilityZones() {
+ t.Fatalf("unexpectedly detected MasterProfile.AvailabilityZones, HasAvailabilityZones returned false after unmarshal")
+ }
}
func TestAgentPoolProfile(t *testing.T) {
// With osType not specified
@@ -248,4 +252,35 @@ func TestContainerServiceProperties(t *testing.T) {
if !prop.HasAvailabilityZones() {
t.Fatalf("unexpectedly detected ContainerServiceProperties HasAvailabilityZones returns false after unmarshal")
}
+
+ // master profile with availability zones
+ ContainerServicePropertiesText = `{"orchestratorProfile": {"orchestratorType": "Kubernetes","orchestratorRelease": "1.12"}, "masterProfile":{"count": 4, "vmSize": "Standard_D2_v2", "availabilityProfile": "VirtualMachineScaleSets", "storageProfile": "ManagedDisks", "diskSizesGB": [750, 250, 600, 1000], "availabilityZones": ["1","2"] }, "agentPoolProfiles":[{ "name": "linuxpool1", "osType" : "Linux", "count": 1, "vmSize": "Standard_D2_v2",
+ "availabilityProfile": "VirtualMachineScaleSets"}]}`
+ prop = &Properties{}
+ if e := json.Unmarshal([]byte(ContainerServicePropertiesText), prop); e != nil {
+ t.Fatalf("unexpectedly detected unmarshal failure for ContainerServiceProperties, %+v", e)
+ }
+
+ if !prop.HasAvailabilityZones() {
+ t.Fatalf("unexpectedly detected ContainerServiceProperties HasAvailabilityZones returns false after unmarshal")
+ }
+
+ if prop.IsClusterAllAvailabilityZones() {
+ t.Fatalf("unexpectedly detected ContainerServiceProperties IsClusterAllAvailabilityZones returns true after unmarshal")
+ }
+ // master profile and agent profile with availability zones
+ ContainerServicePropertiesText = `{"orchestratorProfile": {"orchestratorType": "Kubernetes","orchestratorRelease": "1.12"}, "masterProfile":{"count": 4, "vmSize": "Standard_D2_v2", "availabilityProfile": "VirtualMachineScaleSets", "storageProfile": "ManagedDisks", "diskSizesGB": [750, 250, 600, 1000], "availabilityZones": ["1","2"] }, "agentPoolProfiles":[{ "name": "linuxpool1", "osType" : "Linux", "count": 1, "vmSize": "Standard_D2_v2",
+ "availabilityProfile": "VirtualMachineScaleSets", "availabilityZones": ["1","2"] }]}`
+ prop = &Properties{}
+ if e := json.Unmarshal([]byte(ContainerServicePropertiesText), prop); e != nil {
+ t.Fatalf("unexpectedly detected unmarshal failure for ContainerServiceProperties, %+v", e)
+ }
+
+ if !prop.HasAvailabilityZones() {
+ t.Fatalf("unexpectedly detected ContainerServiceProperties HasAvailabilityZones returns false after unmarshal")
+ }
+
+ if !prop.IsClusterAllAvailabilityZones() {
+ t.Fatalf("unexpectedly detected ContainerServiceProperties IsClusterAllAvailabilityZones returns false after unmarshal")
+ }
}
diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go
index 868fbcc2a0..3fb7b9b9f9 100644
--- a/pkg/api/vlabs/validate.go
+++ b/pkg/api/vlabs/validate.go
@@ -113,6 +113,9 @@ func (a *Properties) Validate(isUpdate bool) error {
if e := a.validateAgentPoolProfiles(isUpdate); e != nil {
return e
}
+ if e := a.validateZones(); e != nil {
+ return e
+ }
if e := a.validateLinuxProfile(); e != nil {
return e
}
@@ -364,11 +367,13 @@ func (a *Properties) validateMasterProfile() error {
if e != nil {
return e
}
- if !a.IsAllVirtualMachineScaleSets() {
+ if !a.IsClusterAllVirtualMachineScaleSets() {
return errors.New("VirtualMachineScaleSets for master profile must be used together with virtualMachineScaleSets for agent profiles. Set \"availabilityProfile\" to \"VirtualMachineScaleSets\" for agent profiles")
}
}
-
+ if m.SinglePlacementGroup != nil && m.AvailabilityProfile == AvailabilitySet {
+ return errors.New("singlePlacementGroup is only supported with VirtualMachineScaleSets")
+ }
return common.ValidateDNSPrefix(m.DNSPrefix)
}
@@ -433,18 +438,6 @@ func (a *Properties) validateAgentPoolProfiles(isUpdate bool) error {
return errors.New("mixed mode availability profiles are not allowed. Please set either VirtualMachineScaleSets or AvailabilitySet in availabilityProfile for all agent pools")
}
- if a.AgentPoolProfiles[i].AvailabilityProfile == AvailabilitySet {
- if a.AgentPoolProfiles[i].HasAvailabilityZones() {
- return errors.New("Availability Zones are not supported with an AvailabilitySet. Please either remove availabilityProfile or set availabilityProfile to VirtualMachineScaleSets")
- }
- }
-
- if a.AgentPoolProfiles[i].HasAvailabilityZones() {
- if a.AgentPoolProfiles[i].Count < len(a.AgentPoolProfiles[i].AvailabilityZones)*2 {
- return errors.New("the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more")
- }
- }
-
if a.AgentPoolProfiles[i].SinglePlacementGroup != nil && a.AgentPoolProfiles[i].AvailabilityProfile == AvailabilitySet {
return errors.New("singlePlacementGroup is only supported with VirtualMachineScaleSets")
}
@@ -470,6 +463,35 @@ func (a *Properties) validateAgentPoolProfiles(isUpdate bool) error {
return nil
}
+func (a *Properties) validateZones() error {
+ if a.OrchestratorProfile.OrchestratorType == Kubernetes {
+ // all zones or no zones should be defined for the cluster
+ if a.HasAvailabilityZones() {
+ if a.IsClusterAllAvailabilityZones() {
+ // master profile
+ if a.MasterProfile.AvailabilityProfile != VirtualMachineScaleSets {
+ return errors.New("Availability Zones are not supported with an AvailabilitySet. Please set availabilityProfile to VirtualMachineScaleSets")
+ }
+ if a.MasterProfile.Count < len(a.MasterProfile.AvailabilityZones)*2 {
+ return errors.New("the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more")
+ }
+ // agent pool profiles
+ for _, agentPoolProfile := range a.AgentPoolProfiles {
+ if agentPoolProfile.AvailabilityProfile == AvailabilitySet {
+ return errors.New("Availability Zones are not supported with an AvailabilitySet. Please either remove availabilityProfile or set availabilityProfile to VirtualMachineScaleSets")
+ }
+ if agentPoolProfile.Count < len(agentPoolProfile.AvailabilityZones)*2 {
+ return errors.New("the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more")
+ }
+ }
+ } else {
+ return errors.New("Availability Zones need to be defined for master profile and all agent pool profiles. Please set \"availabilityZones\" for all profiles")
+ }
+ }
+ }
+ return nil
+}
+
func (a *Properties) validateLinuxProfile() error {
if e := validate.Var(a.LinuxProfile.SSH.PublicKeys[0].KeyData, "required"); e != nil {
return errors.New("KeyData in LinuxProfile.SSH.PublicKeys cannot be empty string")
diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go
index a33aff3c9b..e9e505b808 100644
--- a/pkg/api/vlabs/validate_test.go
+++ b/pkg/api/vlabs/validate_test.go
@@ -1647,12 +1647,73 @@ func TestProperties_ValidateZones(t *testing.T) {
tests := []struct {
name string
orchestratorVersion string
+ masterProfile *MasterProfile
agentProfiles []*AgentPoolProfile
expectedErr string
}{
+ {
+ name: "Master profile with zones version",
+ orchestratorVersion: "1.11.3",
+ masterProfile: &MasterProfile{
+ Count: 3,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ expectedErr: "availabilityZone is only available in Kubernetes version 1.12 or greater",
+ },
+ {
+ name: "Master profile with zones vmas",
+ orchestratorVersion: "1.12.0-beta.0",
+ masterProfile: &MasterProfile{
+ Count: 3,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityZones: []string{"1", "2"},
+ },
+ agentProfiles: []*AgentPoolProfile{
+ {
+ Name: "agentpool",
+ VMSize: "Standard_DS2_v2",
+ Count: 4,
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ },
+ expectedErr: "Availability Zones are not supported with an AvailabilitySet. Please set availabilityProfile to VirtualMachineScaleSets",
+ },
+ {
+ name: "Master profile with zones node count",
+ orchestratorVersion: "1.12.0-beta.0",
+ masterProfile: &MasterProfile{
+ Count: 1,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ agentProfiles: []*AgentPoolProfile{
+ {
+ Name: "agentpool",
+ VMSize: "Standard_DS2_v2",
+ Count: 4,
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ },
+ expectedErr: "the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more",
+ },
{
name: "Agent profile with zones version",
- orchestratorVersion: "1.11.0",
+ orchestratorVersion: "1.11.3",
+ masterProfile: &MasterProfile{
+ Count: 1,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
agentProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
@@ -1667,6 +1728,13 @@ func TestProperties_ValidateZones(t *testing.T) {
{
name: "Agent profile with zones node count",
orchestratorVersion: "1.12.0-beta.0",
+ masterProfile: &MasterProfile{
+ Count: 5,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
agentProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
@@ -1679,8 +1747,15 @@ func TestProperties_ValidateZones(t *testing.T) {
expectedErr: "the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more",
},
{
- name: "Agent profile with zones vmss",
+ name: "Agent profile with zones vmas",
orchestratorVersion: "1.12.0-beta.0",
+ masterProfile: &MasterProfile{
+ Count: 5,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
agentProfiles: []*AgentPoolProfile{
{
Name: "agentpool",
@@ -1690,7 +1765,47 @@ func TestProperties_ValidateZones(t *testing.T) {
AvailabilityZones: []string{"1", "2"},
},
},
- expectedErr: "Availability Zones are not supported with an AvailabilitySet. Please either remove availabilityProfile or set availabilityProfile to VirtualMachineScaleSets",
+ expectedErr: "VirtualMachineScaleSets for master profile must be used together with virtualMachineScaleSets for agent profiles. Set \"availabilityProfile\" to \"VirtualMachineScaleSets\" for agent profiles",
+ },
+ {
+ name: "Master profile with zones and Agent profile without zones",
+ orchestratorVersion: "1.12.0-beta.0",
+ masterProfile: &MasterProfile{
+ Count: 5,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ agentProfiles: []*AgentPoolProfile{
+ {
+ Name: "agentpool",
+ VMSize: "Standard_DS2_v2",
+ Count: 4,
+ AvailabilityProfile: VirtualMachineScaleSets,
+ },
+ },
+ expectedErr: "Availability Zones need to be defined for master profile and all agent pool profiles. Please set \"availabilityZones\" for all profiles",
+ },
+ {
+ name: "Master profile without zones and Agent profile with zones",
+ orchestratorVersion: "1.12.0-beta.0",
+ masterProfile: &MasterProfile{
+ Count: 3,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: VirtualMachineScaleSets,
+ },
+ agentProfiles: []*AgentPoolProfile{
+ {
+ Name: "agentpool",
+ VMSize: "Standard_DS2_v2",
+ Count: 4,
+ AvailabilityProfile: VirtualMachineScaleSets,
+ AvailabilityZones: []string{"1", "2"},
+ },
+ },
+ expectedErr: "Availability Zones need to be defined for master profile and all agent pool profiles. Please set \"availabilityZones\" for all profiles",
},
}
@@ -1698,15 +1813,11 @@ func TestProperties_ValidateZones(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
p := getK8sDefaultProperties(true)
+ p.MasterProfile = test.masterProfile
p.AgentPoolProfiles = test.agentProfiles
p.OrchestratorProfile.OrchestratorVersion = test.orchestratorVersion
- var err error
- if test.orchestratorVersion == "1.11.0" {
- err = p.validateOrchestratorProfile(false)
- } else {
- err = p.Validate(true)
- }
+ err := p.Validate(false)
expectedMsg := test.expectedErr
if err.Error() != expectedMsg {
@@ -1717,24 +1828,57 @@ func TestProperties_ValidateZones(t *testing.T) {
}
func TestProperties_ValidateSinglePlacementGroup(t *testing.T) {
- p := getK8sDefaultProperties(true)
- p.AgentPoolProfiles = []*AgentPoolProfile{
+
+ tests := []struct {
+ name string
+ masterProfile *MasterProfile
+ agentPoolProfiles []*AgentPoolProfile
+ expectedMsg string
+ }{
+ {
+ name: "Master profile VMAS with SinglePlacementGroup",
+ masterProfile: &MasterProfile{
+ Count: 1,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ AvailabilityProfile: AvailabilitySet,
+ SinglePlacementGroup: helpers.PointerToBool(false),
+ },
+ expectedMsg: "singlePlacementGroup is only supported with VirtualMachineScaleSets",
+ },
{
- Name: "agentpool",
- VMSize: "Standard_DS2_v2",
- Count: 2,
- AvailabilityProfile: AvailabilitySet,
- SinglePlacementGroup: helpers.PointerToBool(false),
+ name: "Agent profile VMAS with SinglePlacementGroup",
+ masterProfile: &MasterProfile{
+ Count: 1,
+ DNSPrefix: "foo",
+ VMSize: "Standard_DS2_v2",
+ },
+ agentPoolProfiles: []*AgentPoolProfile{
+ {
+ Name: "agentpool",
+ VMSize: "Standard_DS2_v2",
+ Count: 4,
+ AvailabilityProfile: AvailabilitySet,
+ SinglePlacementGroup: helpers.PointerToBool(false),
+ },
+ },
+ expectedMsg: "singlePlacementGroup is only supported with VirtualMachineScaleSets",
},
}
- p.OrchestratorProfile.OrchestratorVersion = "1.12.0-beta.0"
- err := p.Validate(true)
- expectedMsg := "singlePlacementGroup is only supported with VirtualMachineScaleSets"
- if err.Error() != expectedMsg {
- t.Errorf("expected error with message : %s, but got : %s", expectedMsg, err.Error())
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ p := getK8sDefaultProperties(true)
+ p.OrchestratorProfile.OrchestratorVersion = "1.12.0-beta.0"
+ p.MasterProfile = test.masterProfile
+ p.AgentPoolProfiles = test.agentPoolProfiles
+ err := p.Validate(true)
+ if err.Error() != test.expectedMsg {
+ t.Errorf("expected error message : %s, but got %s", test.expectedMsg, err.Error())
+ }
+ })
}
-
}
func TestProperties_ValidateVNET(t *testing.T) {
diff --git a/test/e2e/engine/template.go b/test/e2e/engine/template.go
index 2874c3fa14..cb01b2dd7b 100644
--- a/test/e2e/engine/template.go
+++ b/test/e2e/engine/template.go
@@ -247,17 +247,6 @@ func (e *Engine) HasNetworkPolicy(name string) bool {
return strings.Contains(e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.NetworkPolicy, name)
}
-// HasAllZonesAgentPools will return true if all of the agent pools have zones
-func (e *Engine) HasAllZonesAgentPools() bool {
- count := 0
- for _, ap := range e.ExpandedDefinition.Properties.AgentPoolProfiles {
- if ap.HasAvailabilityZones() {
- count++
- }
- }
- return count == len(e.ExpandedDefinition.Properties.AgentPoolProfiles)
-}
-
// Write will write the cluster definition to disk
func (e *Engine) Write() error {
json, err := helpers.JSONMarshal(e.ClusterDefinition, false)
diff --git a/test/e2e/kubernetes/kubernetes_test.go b/test/e2e/kubernetes/kubernetes_test.go
index 9cba791333..30b7005ca7 100644
--- a/test/e2e/kubernetes/kubernetes_test.go
+++ b/test/e2e/kubernetes/kubernetes_test.go
@@ -909,9 +909,29 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})
})
+ Describe("with zoned master profile", func() {
+ It("should be labeled with zones for each masternode", func() {
+ if eng.ExpandedDefinition.Properties.MasterProfile.HasAvailabilityZones() {
+ nodeList, err := node.Get()
+ Expect(err).NotTo(HaveOccurred())
+ for _, node := range nodeList.Nodes {
+ role := node.Metadata.Labels["kubernetes.io/role"]
+ if role == "master" {
+ By("Ensuring that we get zones for each master node")
+ zones := node.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
+ contains := strings.Contains(zones, "-")
+ Expect(contains).To(Equal(true))
+ }
+ }
+ } else {
+ Skip("Availability zones was not configured for master profile for this Cluster Definition")
+ }
+ })
+ })
+
Describe("with all zoned agent pools", func() {
It("should be labeled with zones for each node", func() {
- if eng.HasAllZonesAgentPools() {
+ if eng.ExpandedDefinition.Properties.HasAllZonesAgentPools() {
nodeList, err := node.Get()
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Nodes {
@@ -929,7 +949,7 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})
It("should create pv with zone labels and node affinity", func() {
- if eng.HasAllZonesAgentPools() {
+ if eng.ExpandedDefinition.Properties.HasAllZonesAgentPools() {
By("Creating a persistent volume claim")
pvcName := "azure-managed-disk" // should be the same as in pvc-premium.yaml
pvc, err := persistentvolumeclaims.CreatePersistentVolumeClaimsFromFile(filepath.Join(WorkloadDir, "pvc-premium.yaml"), pvcName, "default")