Skip to content
This repository has been archived by the owner on Jan 11, 2023. It is now read-only.

1.12 uses coredns #3987

Merged
merged 9 commits into from
Oct 11, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
183 changes: 183 additions & 0 deletions parts/k8s/addons/coredns.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base

apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes <kubernetesKubeletClusterDomain> in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: <kubernetesCoreDNSSpec>
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: <kubeDNSServiceIP>
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
35 changes: 35 additions & 0 deletions parts/k8s/addons/dns-autoscaler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dns-autoscaler
namespace: kube-system
labels:
k8s-app: dns-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: dns-autoscaler
template:
metadata:
labels:
k8s-app: dns-autoscaler
spec:
containers:
- name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.1
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}
- --logtostderr=true
- --v=2
4 changes: 4 additions & 0 deletions parts/k8s/kubernetesmastercustomdata.yml
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,11 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER
sed -i "s|<kubernetesHyperkubeSpec>|{{WrapAsParameter "kubernetesHyperkubeSpec"}}|g" "/etc/kubernetes/manifests/kube-controller-manager.yaml"
sed -i "s|<kubernetesHyperkubeSpec>|{{WrapAsParameter "kubernetesHyperkubeSpec"}}|g" "/etc/kubernetes/manifests/kube-scheduler.yaml"
sed -i "s|<kubernetesHyperkubeSpec>|{{WrapAsParameter "kubernetesHyperkubeSpec"}}|g; s|<kubeClusterCidr>|{{WrapAsParameter "kubeClusterCidr"}}|g" "/etc/kubernetes/addons/kube-proxy-daemonset.yaml"
{{if IsKubernetesVersionGe "1.12.0"}}
sed -i "s|<kubernetesCoreDNSSpec>|{{WrapAsParameter "kubernetesCoreDNSSpec"}}|g; s|<kubernetesKubeletClusterDomain>|{{WrapAsParameter "kubernetesKubeletClusterDomain"}}|g; s|<kubeDNSServiceIP>|{{WrapAsParameter "kubeDNSServiceIP"}}|g" "/etc/kubernetes/addons/coredns.yaml"
{{else}}
sed -i "s|<kubernetesKubeDNSSpec>|{{WrapAsParameter "kubernetesKubeDNSSpec"}}|g; s|<kubernetesDNSMasqSpec>|{{WrapAsParameter "kubernetesDNSMasqSpec"}}|g; s|<kubernetesExecHealthzSpec>|{{WrapAsParameter "kubernetesExecHealthzSpec"}}|g; s|<kubernetesDNSSidecarSpec>|{{WrapAsParameter "kubernetesDNSSidecarSpec"}}|g; s|<kubernetesKubeletClusterDomain>|{{WrapAsParameter "kubernetesKubeletClusterDomain"}}|g; s|<kubeDNSServiceIP>|{{WrapAsParameter "kubeDNSServiceIP"}}|g" "/etc/kubernetes/addons/kube-dns-deployment.yaml"
{{end}}
sed -i "s|<kubernetesHeapsterSpec>|{{WrapAsParameter "kubernetesHeapsterSpec"}}|g; s|<kubernetesAddonResizerSpec>|{{WrapAsParameter "kubernetesAddonResizerSpec"}}|g" "/etc/kubernetes/addons/kube-heapster-deployment.yaml"

{{if .OrchestratorProfile.KubernetesConfig.IsDashboardEnabled}}
Expand Down
4 changes: 4 additions & 0 deletions parts/k8s/kubernetesmastercustomdatavmss.yml
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,11 @@ MASTER_ARTIFACTS_CONFIG_PLACEHOLDER
sed -i "s|<kubernetesHyperkubeSpec>|{{WrapAsParameter "kubernetesHyperkubeSpec"}}|g" "/etc/kubernetes/manifests/kube-controller-manager.yaml"
sed -i "s|<kubernetesHyperkubeSpec>|{{WrapAsParameter "kubernetesHyperkubeSpec"}}|g" "/etc/kubernetes/manifests/kube-scheduler.yaml"
sed -i "s|<kubernetesHyperkubeSpec>|{{WrapAsParameter "kubernetesHyperkubeSpec"}}|g; s|<kubeClusterCidr>|{{WrapAsParameter "kubeClusterCidr"}}|g" "/etc/kubernetes/addons/kube-proxy-daemonset.yaml"
{{if IsKubernetesVersionGe "1.12.0"}}
sed -i "s|<kubernetesCoreDNSSpec>|{{WrapAsParameter "kubernetesCoreDNSSpec"}}|g; s|<kubernetesKubeletClusterDomain>|{{WrapAsParameter "kubernetesKubeletClusterDomain"}}|g; s|<kubeDNSServiceIP>|{{WrapAsParameter "kubeDNSServiceIP"}}|g" "/etc/kubernetes/addons/coredns.yaml"
{{else}}
sed -i "s|<kubernetesKubeDNSSpec>|{{WrapAsParameter "kubernetesKubeDNSSpec"}}|g; s|<kubernetesDNSMasqSpec>|{{WrapAsParameter "kubernetesDNSMasqSpec"}}|g; s|<kubernetesExecHealthzSpec>|{{WrapAsParameter "kubernetesExecHealthzSpec"}}|g; s|<kubernetesDNSSidecarSpec>|{{WrapAsParameter "kubernetesDNSSidecarSpec"}}|g; s|<kubernetesKubeletClusterDomain>|{{WrapAsParameter "kubernetesKubeletClusterDomain"}}|g; s|<kubeDNSServiceIP>|{{WrapAsParameter "kubeDNSServiceIP"}}|g" "/etc/kubernetes/addons/kube-dns-deployment.yaml"
{{end}}
sed -i "s|<kubernetesHeapsterSpec>|{{WrapAsParameter "kubernetesHeapsterSpec"}}|g; s|<kubernetesAddonResizerSpec>|{{WrapAsParameter "kubernetesAddonResizerSpec"}}|g" "/etc/kubernetes/addons/kube-heapster-deployment.yaml"

{{if .OrchestratorProfile.KubernetesConfig.IsDashboardEnabled}}
Expand Down
6 changes: 6 additions & 0 deletions parts/k8s/kubernetesparams.t
Original file line number Diff line number Diff line change
Expand Up @@ -631,6 +631,12 @@
},
"type": "string"
},
"kubernetesCoreDNSSpec": {
"metadata": {
"description": "The container spec for coredns"
},
"type": "string"
},
"kubernetesDNSMasqSpec": {
"metadata": {
"description": "The container spec for kube-dnsmasq-amd64."
Expand Down
20 changes: 19 additions & 1 deletion pkg/acsengine/artifacts.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,28 @@ func kubernetesAddonSettingsInit(profile *api.Properties) []kubernetesAddonSetti
kubernetesFeatureSetting{
"kubernetesmasteraddons-kube-dns-deployment.yaml",
"kube-dns-deployment.yaml",
true,
!common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.12.0"),
},
profile.OrchestratorProfile.KubernetesConfig.GetAddonScript(DefaultKubeDNSDeploymentAddonName),
},
{
kubernetesFeatureSetting{
"coredns.yaml",
"coredns.yaml",
common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.12.0"),
},
profile.OrchestratorProfile.KubernetesConfig.GetAddonScript(DefaultCoreDNSAddonName),
},
{
kubernetesFeatureSetting{
"dns-autoscaler.yaml",
"dns-autoscaler.yaml",
// TODO enable this when it has been smoke tested
//common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.12.0"),
false,
},
profile.OrchestratorProfile.KubernetesConfig.GetAddonScript(DefaultDNSAutoscalerAddonName),
},
{
kubernetesFeatureSetting{
"kubernetesmasteraddons-kube-proxy-daemonset.yaml",
Expand Down
4 changes: 4 additions & 0 deletions pkg/acsengine/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,10 @@ const (
DefaultKubeHeapsterDeploymentAddonName = "kube-heapster-deployment"
// DefaultKubeDNSDeploymentAddonName is the name of the kube-dns-deployment addon
DefaultKubeDNSDeploymentAddonName = "kube-dns-deployment"
// DefaultCoreDNSAddonName is the name of the coredns addon
DefaultCoreDNSAddonName = "coredns"
// DefaultDNSAutoscalerAddonName is the name of the coredns addon
DefaultDNSAutoscalerAddonName = "dns-autoscaler"
// DefaultKubeProxyAddonName is the name of the kube-proxy config addon
DefaultKubeProxyAddonName = "kube-proxy-daemonset"
// DefaultAzureStorageClassesAddonName is the name of the azure storage classes addon
Expand Down
1 change: 1 addition & 0 deletions pkg/acsengine/params_k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ func assignKubernetesParameters(properties *api.Properties, parametersMap params
}
}
}
addValue(parametersMap, "kubernetesCoreDNSSpec", "coredns/coredns:1.2.2")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this belong in the K8sConfigComponentVersion Map :P ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'll do it later to make your rebase easier :)

addValue(parametersMap, "kubernetesKubeDNSSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion]["dns"])
addValue(parametersMap, "kubernetesPodInfraContainerSpec", cloudSpecConfig.KubernetesSpecConfig.KubernetesImageBase+KubeConfigs[k8sVersion]["pause"])
addValue(parametersMap, "cloudproviderConfig", api.CloudProviderConfig{
Expand Down
33 changes: 28 additions & 5 deletions test/e2e/kubernetes/kubernetes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@ var _ = BeforeSuite(func() {
masterSSHPort = "22"
}
masterSSHPrivateKeyFilepath = cfg.GetSSHKeyPath()
// TODO
// If no user-configurable stability iteration value is passed in, run stability tests once
/*if cfg.StabilityIterations == 0 {
cfg.StabilityIterations = 1
}*/
})

var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", func() {
Expand Down Expand Up @@ -115,13 +120,18 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu

It("should have stable internal container networking", func() {
name := fmt.Sprintf("alpine-%s", cfg.Name)
command := fmt.Sprintf("nc -vz kubernetes 443")
var command string
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.12.0") {
command = fmt.Sprintf("nc -vz kubernetes 443 && nc -vz kubernetes.default.svc 443 && nc -vz kubernetes.default.svc.cluster.local 443")
} else {
command = fmt.Sprintf("nc -vz kubernetes 443")
}
successes, err := pod.RunCommandMultipleTimes(pod.RunLinuxPod, "alpine", name, command, cfg.StabilityIterations)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
})

It("should have functional DNS", func() {
It("should be able to launch a long-running container networking DNS liveness pod", func() {
if !eng.HasNetworkPolicy("calico") {
var err error
var p *pod.Pod
Expand All @@ -138,7 +148,9 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
}
})

It("should have functional host OS DNS", func() {
kubeConfig, err := GetConfig()
Expect(err).NotTo(HaveOccurred())
master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName())
Expand Down Expand Up @@ -222,7 +234,9 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
if err != nil {
log.Printf("Error while querying DNS: %s\n", err)
}
})

It("should have functional container networking DNS", func() {
By("Ensuring that we have functional DNS resolution from a container")
j, err := job.CreateJobFromFile(filepath.Join(WorkloadDir, "validate-dns.yaml"), "validate-dns", "default")
Expect(err).NotTo(HaveOccurred())
Expand All @@ -235,16 +249,25 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))

By("Ensuring that we have stable DNS resolution from a container")
By("Ensuring that we have stable external DNS resolution from a container")
name := fmt.Sprintf("alpine-%s", cfg.Name)
command := fmt.Sprintf("nc -vz bbc.co.uk 80 || nc -vz google.com 443 || nc -vz microsoft.com 80")
successes, err := pod.RunCommandMultipleTimes(pod.RunLinuxPod, "alpine", name, command, cfg.StabilityIterations)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
})

It("should have kube-dns running", func() {
running, err := pod.WaitOnReady("kube-dns", "kube-system", 3, 30*time.Second, cfg.Timeout)
It("should have DNS pod running", func() {
var err error
var running bool
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.12.0") {
By("Ensuring that coredns is running")
running, err = pod.WaitOnReady("coredns", "kube-system", 3, 30*time.Second, cfg.Timeout)

} else {
By("Ensuring that kube-dns is running")
running, err = pod.WaitOnReady("kube-dns", "kube-system", 3, 30*time.Second, cfg.Timeout)
}
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
Expand Down
Loading