diff --git a/docs/clusterdefinition.md b/docs/clusterdefinition.md index 13d327d5bf..e3f7153a7e 100644 --- a/docs/clusterdefinition.md +++ b/docs/clusterdefinition.md @@ -508,7 +508,7 @@ A cluster can have 0 to 12 agent pool profiles. Agent Pool Profiles are used for | ---------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | availabilityProfile | no | Supported values are `VirtualMachineScaleSets` (default, except for Kubernetes clusters before version 1.10) and `AvailabilitySet`. | | count | yes | Describes the node count | -| availabilityZones | no | To protect your cluster from datacenter-level failures, you can provide Availability Zones for all the agentPools and master profiles in your cluster. Only applies to Kubernetes clusters version 1.12+. Supported values are arrays of strings, each representing a supported availability zone in a region for your subscription. e.g. `"availabilityZones": ["1","2"]` represents zone 1 and zone 2 can be used. To get supported zones for a region in your subscription, run `az vm list-skus --location centralus --query "[?name=='Standard_DS2_v2'].[locationInfo, restrictions"] -o table`. You should see values like `'zones': ['2', '3', '1']` appear in the first column. If `NotAvailableForSubscription` appears in the output, then you need to create an Azure support ticket to enable zones for that region. Note: For availability zones, only standard load balancer is supported. ([Availability zone example](../examples/e2e-tests/kubernetes/zones)). To ensure high availability, each profile must define at least two nodes per zone. e.g. An agent pool profile with `"availabilityZones": ["1","2"]` must have at least 4 nodes total with `"count": 4`. Availability Zones also requires `Standard` LoadBalancer. To use Standard LoadBalacer, set KubernetesConfig `"loadBalancerSku": "Standard"`. | +| availabilityZones | no | To protect your cluster from datacenter-level failures, you can provide Availability Zones for all the agentPools and master profiles in your cluster. Only applies to Kubernetes clusters version 1.12+. Supported values are arrays of strings, each representing a supported availability zone in a region for your subscription. e.g. `"availabilityZones": ["1","2"]` represents zone 1 and zone 2 can be used. To get supported zones for a region in your subscription, run `az vm list-skus --location centralus --query "[?name=='Standard_DS2_v2'].[locationInfo, restrictions"] -o table`. You should see values like `'zones': ['2', '3', '1']` appear in the first column. If `NotAvailableForSubscription` appears in the output, then you need to create an Azure support ticket to enable zones for that region. Note: For availability zones, only standard load balancer is supported. ([Availability zone example](../examples/e2e-tests/kubernetes/zones)). To ensure high availability, each profile must define at least two nodes per zone. e.g. An agent pool profile with `"availabilityZones": ["1","2"]` must have at least 4 nodes total with `"count": 4`. When `"availabilityZones"` is set, the `"loadBalancerSku"` will default to `Standard` as required by availability zones. | | singlePlacementGroup | no | Supported values are `true` (default) and `false`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. `true`: A VMSS with a single placement group and has a range of 0-100 VMs. `false`: A VMSS with multiple placement groups and has a range of 0-1,000 VMs. For more information, check out [virtual machine scale sets placement groups](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups). | | scaleSetPriority | no | Supported values are `Regular` (default) and `Low`. Only applies to clusters with availabilityProfile `VirtualMachineScaleSets`. Enables the usage of [Low-priority VMs on Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-use-low-priority). | | scaleSetEvictionPolicy | no | Supported values are `Delete` (default) and `Deallocate`. Only applies to clusters with availabilityProfile of `VirtualMachineScaleSets` and scaleSetPriority of `Low`. | diff --git a/examples/e2e-tests/kubernetes/zones/definition.json b/examples/e2e-tests/kubernetes/zones/definition.json index be5fd9655f..573058c1d3 100644 --- a/examples/e2e-tests/kubernetes/zones/definition.json +++ b/examples/e2e-tests/kubernetes/zones/definition.json @@ -3,10 +3,7 @@ "properties": { "orchestratorProfile": { "orchestratorType": "Kubernetes", - "orchestratorRelease": "1.12", - "kubernetesConfig": { - "loadBalancerSku": "Standard" - } + "orchestratorRelease": "1.12" }, "masterProfile": { "count": 5, diff --git a/pkg/acsengine/defaults.go b/pkg/acsengine/defaults.go index db52d10381..a05b695a77 100644 --- a/pkg/acsengine/defaults.go +++ b/pkg/acsengine/defaults.go @@ -429,11 +429,11 @@ func setOrchestratorDefaults(cs *api.ContainerService, isUpdate bool) { a.OrchestratorProfile.KubernetesConfig.UseInstanceMetadata = helpers.PointerToBool(api.DefaultUseInstanceMetadata) } - if a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "" { + if !a.HasAvailabilityZones() && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "" { a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = api.DefaultLoadBalancerSku } - if common.IsKubernetesVersionGe(a.OrchestratorProfile.OrchestratorVersion, "1.11.0") && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "Standard" { + if common.IsKubernetesVersionGe(a.OrchestratorProfile.OrchestratorVersion, "1.11.0") && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "Standard" && a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB == nil { a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB = helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB) } @@ -593,12 +593,13 @@ func setMasterProfileDefaults(a *api.Properties, isUpgrade bool) { // setVMSSDefaultsForMasters func setVMSSDefaultsForMasters(a *api.Properties) { - if a.MasterProfile.Count > 100 { - a.MasterProfile.SinglePlacementGroup = helpers.PointerToBool(false) - } if a.MasterProfile.SinglePlacementGroup == nil { a.MasterProfile.SinglePlacementGroup = helpers.PointerToBool(api.DefaultSinglePlacementGroup) } + if a.MasterProfile.HasAvailabilityZones() && (a.OrchestratorProfile.KubernetesConfig != nil && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "") { + a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard" + a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB = helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB) + } } // setVMSSDefaultsForAgents @@ -611,8 +612,9 @@ func setVMSSDefaultsForAgents(a *api.Properties) { if profile.SinglePlacementGroup == nil { profile.SinglePlacementGroup = helpers.PointerToBool(api.DefaultSinglePlacementGroup) } - if profile.SinglePlacementGroup == helpers.PointerToBool(false) { - profile.StorageProfile = api.ManagedDisks + if profile.HasAvailabilityZones() && (a.OrchestratorProfile.KubernetesConfig != nil && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == "") { + a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard" + a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB = helpers.PointerToBool(api.DefaultExcludeMasterFromStandardLB) } } diff --git a/pkg/acsengine/defaults_test.go b/pkg/acsengine/defaults_test.go index 9bdf3c57ac..0824b624c7 100644 --- a/pkg/acsengine/defaults_test.go +++ b/pkg/acsengine/defaults_test.go @@ -621,6 +621,16 @@ func TestMasterProfileDefaults(t *testing.T) { properties.MasterProfile.FirstConsecutiveStaticIP, "10.239.0.4") } + // this validates default configurations for LoadBalancerSku and ExcludeMasterFromStandardLB + mockCS = getMockBaseContainerService("1.11.6") + properties = mockCS.Properties + properties.OrchestratorProfile.OrchestratorType = "Kubernetes" + properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard" + setPropertiesDefaults(&mockCS, false, false) + if *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != api.DefaultExcludeMasterFromStandardLB { + t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t", + *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB) + } } func TestAgentPoolProfile(t *testing.T) { @@ -736,7 +746,6 @@ func TestSetVMSSDefaultsAndZones(t *testing.T) { properties.OrchestratorProfile.OrchestratorType = "Kubernetes" properties.MasterProfile.AvailabilityProfile = api.VirtualMachineScaleSets properties.MasterProfile.AvailabilityZones = []string{"1", "2"} - properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard" setPropertiesDefaults(&mockCS, false, false) if *properties.MasterProfile.SinglePlacementGroup != api.DefaultSinglePlacementGroup { t.Fatalf("MasterProfile.SinglePlacementGroup default did not have the expected configuration, got %t, expected %t", @@ -746,6 +755,10 @@ func TestSetVMSSDefaultsAndZones(t *testing.T) { t.Fatalf("MasterProfile.HasAvailabilityZones did not have the expected return, got %t, expected %t", properties.MasterProfile.HasAvailabilityZones(), true) } + if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" { + t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s", + properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, "Standard") + } if *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != api.DefaultExcludeMasterFromStandardLB { t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t", *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB) @@ -770,7 +783,6 @@ func TestSetVMSSDefaultsAndZones(t *testing.T) { properties.OrchestratorProfile.OrchestratorType = "Kubernetes" properties.AgentPoolProfiles[0].Count = 4 properties.AgentPoolProfiles[0].AvailabilityZones = []string{"1", "2"} - properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku = "Standard" setPropertiesDefaults(&mockCS, false, false) if !properties.AgentPoolProfiles[0].IsVirtualMachineScaleSets() { t.Fatalf("AgentPoolProfile[0].AvailabilityProfile did not have the expected configuration, got %s, expected %s", @@ -784,6 +796,10 @@ func TestSetVMSSDefaultsAndZones(t *testing.T) { t.Fatalf("AgentPoolProfile[0].SinglePlacementGroup default did not have the expected configuration, got %t, expected %t", *properties.AgentPoolProfiles[0].SinglePlacementGroup, api.DefaultSinglePlacementGroup) } + if properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" { + t.Fatalf("OrchestratorProfile.KubernetesConfig.LoadBalancerSku did not have the expected configuration, got %s, expected %s", + properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku, "Standard") + } if *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB != api.DefaultExcludeMasterFromStandardLB { t.Fatalf("OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB did not have the expected configuration, got %t, expected %t", *properties.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB, api.DefaultExcludeMasterFromStandardLB) diff --git a/pkg/api/types.go b/pkg/api/types.go index c59a06e305..8afeab4744 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -719,13 +719,26 @@ func (p *Properties) HasVMSSAgentPool() bool { // HasZonesForAllAgentPools returns true if all of the agent pools have zones func (p *Properties) HasZonesForAllAgentPools() bool { - count := 0 for _, ap := range p.AgentPoolProfiles { - if ap.HasAvailabilityZones() { - count++ + if !ap.HasAvailabilityZones() { + return false + } + } + return true +} + +// HasAvailabilityZones returns true if the cluster contains a profile with zones +func (p *Properties) HasAvailabilityZones() bool { + hasZones := p.MasterProfile != nil && p.MasterProfile.HasAvailabilityZones() + if !hasZones && p.AgentPoolProfiles != nil { + for _, agentPoolProfile := range p.AgentPoolProfiles { + if agentPoolProfile.HasAvailabilityZones() { + hasZones = true + break + } } } - return count == len(p.AgentPoolProfiles) + return hasZones } // IsCustomVNET returns true if the customer brought their own VNET diff --git a/pkg/api/vlabs/types.go b/pkg/api/vlabs/types.go index d52b5f2a08..a5c576d5c9 100644 --- a/pkg/api/vlabs/types.go +++ b/pkg/api/vlabs/types.go @@ -507,7 +507,7 @@ func (p *Properties) HasWindows() bool { return false } -// HasAvailabilityZones returns true if the cluster contains pools with zones +// HasAvailabilityZones returns true if the cluster contains any profile with zones func (p *Properties) HasAvailabilityZones() bool { hasZones := p.MasterProfile != nil && p.MasterProfile.HasAvailabilityZones() if !hasZones && p.AgentPoolProfiles != nil { @@ -566,18 +566,19 @@ func (m *MasterProfile) HasAvailabilityZones() bool { return m.AvailabilityZones != nil && len(m.AvailabilityZones) > 0 } -// IsClusterAllAvailabilityZones returns true if the cluster contains AZs for all agents and masters profiles -func (p *Properties) IsClusterAllAvailabilityZones() bool { - isAll := p.MasterProfile != nil && p.MasterProfile.HasAvailabilityZones() - if isAll && p.AgentPoolProfiles != nil { - for _, agentPoolProfile := range p.AgentPoolProfiles { - if !agentPoolProfile.HasAvailabilityZones() { - isAll = false - break - } +// HasZonesForAllAgentPools returns true if all of the agent pools have zones +func (p *Properties) HasZonesForAllAgentPools() bool { + for _, ap := range p.AgentPoolProfiles { + if !ap.HasAvailabilityZones() { + return false } } - return isAll + return true +} + +// IsClusterAllAvailabilityZones returns true if the cluster contains AZs for all agents and masters profiles +func (p *Properties) IsClusterAllAvailabilityZones() bool { + return (p.MasterProfile != nil && p.MasterProfile.HasAvailabilityZones()) && p.HasZonesForAllAgentPools() } // IsClusterAllVirtualMachineScaleSets returns true if the cluster contains only Virtual Machine Scale Sets diff --git a/pkg/api/vlabs/validate.go b/pkg/api/vlabs/validate.go index 7898714ae6..d8fcbd602f 100644 --- a/pkg/api/vlabs/validate.go +++ b/pkg/api/vlabs/validate.go @@ -277,6 +277,9 @@ func (a *Properties) validateOrchestratorProfile(isUpdate bool) error { return errors.Errorf("loadBalancerSku is only available in Kubernetes version %s or greater; unable to validate for Kubernetes version %s", minVersion.String(), o.OrchestratorVersion) } + if helpers.IsFalseBoolPointer(a.OrchestratorProfile.KubernetesConfig.ExcludeMasterFromStandardLB) { + return errors.Errorf("standard loadBalancerSku should exclude master nodes. Please set KubernetesConfig \"ExcludeMasterFromStandardLB\" to \"true\"") + } } } case OpenShift: @@ -484,7 +487,7 @@ func (a *Properties) validateZones() error { return errors.New("the node count and the number of availability zones provided can result in zone imbalance. To achieve zone balance, each zone should have at least 2 nodes or more") } } - if a.OrchestratorProfile.KubernetesConfig == nil || a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" { + if a.OrchestratorProfile.KubernetesConfig != nil && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "" && a.OrchestratorProfile.KubernetesConfig.LoadBalancerSku != "Standard" { return errors.New("Availability Zones requires Standard LoadBalancer. Please set KubernetesConfig \"LoadBalancerSku\" to \"Standard\"") } } else { diff --git a/pkg/api/vlabs/validate_test.go b/pkg/api/vlabs/validate_test.go index c66bf8e739..8a6f9d0eea 100644 --- a/pkg/api/vlabs/validate_test.go +++ b/pkg/api/vlabs/validate_test.go @@ -1645,11 +1645,13 @@ func TestProperties_ValidateAddon(t *testing.T) { } func TestProperties_ValidateZones(t *testing.T) { tests := []struct { - name string - orchestratorRelease string - masterProfile *MasterProfile - agentProfiles []*AgentPoolProfile - expectedErr string + name string + orchestratorRelease string + loadBalancerSku string + excludeMasterFromStandardLB bool + masterProfile *MasterProfile + agentProfiles []*AgentPoolProfile + expectedErr string }{ { name: "Master profile with zones version", @@ -1810,6 +1812,7 @@ func TestProperties_ValidateZones(t *testing.T) { { name: "all zones and basic loadbalancer", orchestratorRelease: "1.12", + loadBalancerSku: "Basic", masterProfile: &MasterProfile{ Count: 5, DNSPrefix: "foo", @@ -1828,6 +1831,29 @@ func TestProperties_ValidateZones(t *testing.T) { }, expectedErr: "Availability Zones requires Standard LoadBalancer. Please set KubernetesConfig \"LoadBalancerSku\" to \"Standard\"", }, + { + name: "all zones with standard loadbalancer and false excludeMasterFromStandardLB", + orchestratorRelease: "1.12", + loadBalancerSku: "Standard", + excludeMasterFromStandardLB: false, + masterProfile: &MasterProfile{ + Count: 5, + DNSPrefix: "foo", + VMSize: "Standard_DS2_v2", + AvailabilityProfile: VirtualMachineScaleSets, + AvailabilityZones: []string{"1", "2"}, + }, + agentProfiles: []*AgentPoolProfile{ + { + Name: "agentpool", + VMSize: "Standard_DS2_v2", + Count: 4, + AvailabilityProfile: VirtualMachineScaleSets, + AvailabilityZones: []string{"1", "2"}, + }, + }, + expectedErr: "standard loadBalancerSku should exclude master nodes. Please set KubernetesConfig \"ExcludeMasterFromStandardLB\" to \"true\"", + }, } for _, test := range tests { @@ -1837,6 +1863,10 @@ func TestProperties_ValidateZones(t *testing.T) { p.MasterProfile = test.masterProfile p.AgentPoolProfiles = test.agentProfiles p.OrchestratorProfile.OrchestratorRelease = test.orchestratorRelease + p.OrchestratorProfile.KubernetesConfig = &KubernetesConfig{ + LoadBalancerSku: test.loadBalancerSku, + ExcludeMasterFromStandardLB: helpers.PointerToBool(test.excludeMasterFromStandardLB), + } err := p.Validate(false)