diff --git a/Makefile b/Makefile index 3f26f42c9fa..446be822f12 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,9 @@ metrics-unit-test docker-metrics-test # VERSION is the source revision that executables and images are built from. -VERSION ?= $(shell git describe --tags --always --dirty || echo "unknown") +#VERSION ?= $(shell git describe --tags --always --dirty || echo "unknown") +# git describe is generating a wrong git tag hence using latest by default. +VERSION = "latest" # DESTDIR is where distribution output (container images) is placed. DESTDIR = . @@ -262,6 +264,17 @@ format: check-format: FORMAT_FLAGS = -l check-format: format +version: + @echo ${VERSION} + +upload-resources-to-github: + ${MAKEFILE_PATH}/scripts/upload-resources-to-github + +generate-k8s-yaml: + ${MAKEFILE_PATH}/scripts/generate-k8s-yaml + +release: generate-k8s-yaml upload-resources-to-github + # Clean temporary files and build artifacts from the project. clean: @rm -f -- $(BINS) diff --git a/charts/aws-calico/.helmignore b/charts/aws-calico/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/aws-calico/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/aws-calico/Chart.yaml b/charts/aws-calico/Chart.yaml new file mode 100644 index 00000000000..86ba682c338 --- /dev/null +++ b/charts/aws-calico/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +description: A Helm chart for installing Calico on AWS +website: https://docs.aws.amazon.com/eks/latest/userguide/calico.html +name: aws-calico +version: 0.3.4 +appVersion: 3.15.1 +icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png diff --git a/charts/aws-calico/README.md b/charts/aws-calico/README.md new file mode 100755 index 00000000000..9abbca691ca --- /dev/null +++ b/charts/aws-calico/README.md @@ -0,0 +1,66 @@ +# Calico on AWS + +This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html + +## Prerequisites + +- Kubernetes 1.11+ running on AWS + +## Installing the Chart + +First add the EKS repository to Helm: + +```shell +helm repo add eks https://aws.github.io/eks-charts +``` + +Install the Calico CRDs: + +```shell +kubectl apply -k github.com/aws/eks-charts/tree/master/stable/aws-calico/crds +``` + +To install the chart with the release name `aws-calico` and default configuration: + +```shell +$ helm install --name aws-calico --namespace kube-system eks/aws-calico +``` + +To install into an EKS cluster where the CNI is already installed, you can run: + +```shell +helm upgrade --install --recreate-pods --force aws-calico --namespace kube-system eks/aws-calico +``` + +If you receive an error similar to `Error: release aws-calico failed: "aws-calico" already exists`, simply rerun the above command. + +## Configuration + +The following table lists the configurable parameters for this chart and their default values. + +| Parameter | Description | Default | +|----------------------------------------|---------------------------------------------------------|---------------------------------| +| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | +| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | +| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | +| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | +| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | +| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | +| `calico.node.logseverity` | Calico Node Log Severity | `Info` | +| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | +| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.tag` | Calico version | `v3.8.1` | +| `fullnameOverride` | Override the fullname of the chart | `calico` | +| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | +| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: + +```shell +$ helm install --name aws-calico --namespace kube-system eks/aws-calico --values values.yaml +``` diff --git a/charts/aws-calico/crds/crds.yaml b/charts/aws-calico/crds/crds.yaml new file mode 100755 index 00000000000..73fe142f41f --- /dev/null +++ b/charts/aws-calico/crds/crds.yaml @@ -0,0 +1,214 @@ +# Create all the CustomResourceDefinitions needed for +# Calico policy-only mode. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: IPPool + plural: ippools + singular: ippool + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: NetworkSet + plural: networksets + singular: networkset \ No newline at end of file diff --git a/charts/aws-calico/crds/kustomization.yaml b/charts/aws-calico/crds/kustomization.yaml new file mode 100644 index 00000000000..f04014ce47c --- /dev/null +++ b/charts/aws-calico/crds/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- crds.yaml \ No newline at end of file diff --git a/charts/aws-calico/templates/_helpers.tpl b/charts/aws-calico/templates/_helpers.tpl new file mode 100755 index 00000000000..0a18027c2d8 --- /dev/null +++ b/charts/aws-calico/templates/_helpers.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-calico.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-calico.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-calico.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-calico.labels" -}} +helm.sh/chart: {{ include "aws-calico.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-calico.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-calico.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/aws-calico/templates/config-map.yaml b/charts/aws-calico/templates/config-map.yaml new file mode 100755 index 00000000000..9a3cfaa519a --- /dev/null +++ b/charts/aws-calico/templates/config-map.yaml @@ -0,0 +1,22 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +data: + ladder: |- + { + "coresToReplicas": [], + "nodesToReplicas": + [ + [1, 1], + [10, 2], + [100, 3], + [250, 4], + [500, 5], + [1000, 6], + [1500, 7], + [2000, 8] + ] + } \ No newline at end of file diff --git a/charts/aws-calico/templates/daemon-set.yaml b/charts/aws-calico/templates/daemon-set.yaml new file mode 100755 index 00000000000..769ad5d42b9 --- /dev/null +++ b/charts/aws-calico/templates/daemon-set.yaml @@ -0,0 +1,156 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: "{{ include "aws-calico.fullname" . }}-node" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + {{- with .Values.calico.node.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + nodeSelector: + {{- toYaml .Values.calico.node.nodeSelector | nindent 8 }} + hostNetwork: true + serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: "{{ .Values.calico.node.image }}:{{ .Values.calico.tag }}" + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Use eni not cali for interface prefix + - name: FELIX_INTERFACEPREFIX + value: "eni" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ .Values.calico.node.logseverity }}" + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,ecs" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + - name: FELIX_TYPHAK8SSERVICENAME + value: "calico-typha" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # This will make Felix honor AWS VPC CNI's mangle table + # rules. + - name: FELIX_IPTABLESMANGLEALLOWACTION + value: Return + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + - name: FELIX_LOGSEVERITYSYS + value: "none" + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "true" + - name: FELIX_ROUTESOURCE + value: "WorkloadIPs" + - name: NO_DEFAULT_POOLS + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # No IP address needed. + - name: IP + value: "" + - name: FELIX_HEALTHENABLED + value: "true" + {{- if .Values.calico.node.extraEnv }} + {{- toYaml .Values.calico.node.extraEnv | nindent 12 }} + {{- end }} + securityContext: + privileged: true + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + periodSeconds: 10 + resources: + {{- toYaml .Values.calico.node.resources | nindent 12 }} + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + volumes: + # Used to ensure proper kmods are installed. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + tolerations: + # Make sure calico/node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists diff --git a/charts/aws-calico/templates/deployment.yaml b/charts/aws-calico/templates/deployment.yaml new file mode 100755 index 00000000000..53159ddff98 --- /dev/null +++ b/charts/aws-calico/templates/deployment.yaml @@ -0,0 +1,134 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + revisionHistoryLimit: 2 + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + template: + metadata: + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + {{- with .Values.calico.typha.podAnnotations }} + annotations: {{- toYaml . | nindent 10 }} + {{- end }} + spec: + priorityClassName: system-cluster-critical + nodeSelector: + {{- toYaml .Values.calico.typha.nodeSelector | nindent 8 }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + {{- if .Values.calico.typha.tolerations }} + {{- toYaml .Values.calico.typha.tolerations | nindent 8 }} + {{- end }} + hostNetwork: true + serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: "{{ .Values.calico.typha.image }}:{{ .Values.calico.tag }}" + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + env: + # Use eni not cali for interface prefix + - name: FELIX_INTERFACEPREFIX + value: "eni" + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + - name: TYPHA_LOGSEVERITYSCREEN + value: "{{ .Values.calico.typha.logseverity }}" + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "true" + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "9093" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_MAXCONNECTIONSLOWERLIMIT + value: "1" + - name: TYPHA_HEALTHENABLED + value: "true" + # This will make Felix honor AWS VPC CNI's mangle table + # rules. + - name: FELIX_IPTABLESMANGLEALLOWACTION + value: Return + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + resources: + {{- toYaml .Values.calico.typha.resources | nindent 12 }} + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + {{- with .Values.calico.typha_autoscaler.podAnnotations }} + annotations: {{- toYaml . | nindent 10 }} + {{- end }} + spec: + priorityClassName: system-cluster-critical + nodeSelector: + {{- toYaml .Values.calico.typha_autoscaler.nodeSelector | nindent 8 }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + {{- if .Values.calico.typha_autoscaler.tolerations }} + {{- toYaml .Values.calico.typha_autoscaler.tolerations | nindent 8 }} + {{- end }} + containers: + - image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}" + name: autoscaler + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler + - --target=deployment/{{ include "aws-calico.fullname" . }}-typha + - --logtostderr=true + - --v=2 + resources: + {{- toYaml .Values.calico.typha_autoscaler.resources | nindent 12 }} + serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" diff --git a/charts/aws-calico/templates/pod-disruption-budget.yaml b/charts/aws-calico/templates/pod-disruption-budget.yaml new file mode 100644 index 00000000000..8635b31545f --- /dev/null +++ b/charts/aws-calico/templates/pod-disruption-budget.yaml @@ -0,0 +1,13 @@ +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" diff --git a/charts/aws-calico/templates/podsecuritypolicy.yaml b/charts/aws-calico/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000000..c946ee71427 --- /dev/null +++ b/charts/aws-calico/templates/podsecuritypolicy.yaml @@ -0,0 +1,211 @@ +{{- if .Values.podSecurityPolicy.create -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "aws-calico.fullname" . }}-node + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node +{{ include "aws-calico.labels" . | indent 4 }} +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - ALL + hostNetwork: true + hostIPC: false + hostPID: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + - 'hostPath' + allowedHostPaths: + - pathPrefix: "/lib/modules" + readOnly: false + - pathPrefix: "/var/run/calico" + readOnly: false + - pathPrefix: "/var/lib/calico" + readOnly: false + - pathPrefix: "/run/xtables.lock" + readOnly: false + runAsUser: + rule: 'RunAsAny' + runAsGroup: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "aws-calico.fullname" . }}-typha + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha +{{ include "aws-calico.labels" . | indent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + hostNetwork: true + hostPorts: + - max: 5473 + min: 5473 + hostIPC: false + hostPID: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler +{{ include "aws-calico.labels" . | indent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aws-calico.fullname" . }}-node-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node +{{ include "aws-calico.labels" . | indent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "aws-calico.fullname" . }}-node +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha +{{ include "aws-calico.labels" . | indent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "aws-calico.fullname" . }}-typha +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler +{{ include "aws-calico.labels" . | indent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aws-calico.fullname" . }}-node-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + kind: Role + name: {{ include "aws-calico.fullname" . }}-node-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ include "aws-calico.serviceAccountName" . }}-node + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + kind: Role + name: {{ include "aws-calico.fullname" . }}-typha-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ include "aws-calico.serviceAccountName" . }}-node + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + kind: Role + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ include "aws-calico.serviceAccountName" . }}-typha-cpha + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/aws-calico/templates/rbac.yaml b/charts/aws-calico/templates/rbac.yaml new file mode 100755 index 00000000000..7caa7fa4048 --- /dev/null +++ b/charts/aws-calico/templates/rbac.yaml @@ -0,0 +1,214 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "{{ include "aws-calico.fullname" . }}-node" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" +{{ include "aws-calico.labels" . | indent 4 }} +rules: + # The CNI plugin needs to get pods, nodes, namespaces, and configmaps. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ include "aws-calico.fullname" . }}-node" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ include "aws-calico.fullname" . }}-node" +subjects: + - kind: ServiceAccount + name: "{{ include "aws-calico.serviceAccountName" . }}-node" + namespace: {{ .Release.Namespace }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" +subjects: + - kind: ServiceAccount + name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" + namespace: {{ .Release.Namespace }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale"] + verbs: ["get", "update"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" +subjects: + - kind: ServiceAccount + name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" + namespace: "{{ .Release.Namespace }}" diff --git a/charts/aws-calico/templates/service-accounts.yaml b/charts/aws-calico/templates/service-accounts.yaml new file mode 100755 index 00000000000..21409395713 --- /dev/null +++ b/charts/aws-calico/templates/service-accounts.yaml @@ -0,0 +1,18 @@ +# Create the ServiceAccount and roles necessary for Calico. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ include "aws-calico.serviceAccountName" . }}-node" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" +{{ include "aws-calico.labels" . | indent 4 }} + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-cpha" +{{ include "aws-calico.labels" . | indent 4 }} \ No newline at end of file diff --git a/charts/aws-calico/templates/service.yaml b/charts/aws-calico/templates/service.yaml new file mode 100755 index 00000000000..4edb632d466 --- /dev/null +++ b/charts/aws-calico/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: "{{ include "aws-calico.fullname" . }}-typha" + selector: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" diff --git a/charts/aws-calico/values.yaml b/charts/aws-calico/values.yaml new file mode 100644 index 00000000000..174a286bbf1 --- /dev/null +++ b/charts/aws-calico/values.yaml @@ -0,0 +1,57 @@ +fullnameOverride: calico + +serviceAccount: + create: true + +podSecurityPolicy: + create: false + +calico: + tag: v3.15.1 + + typha: + logseverity: Info #Debug, Info, Warning, Error, Fatal + image: quay.io/calico/typha + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "96Mi" + cpu: "100m" + tolerations: [] + nodeSelector: + beta.kubernetes.io/os: linux + podAnnotations: {} + node: + logseverity: Info #Debug, Info, Warning, Error, Fatal + image: quay.io/calico/node + resources: + requests: + memory: "32Mi" + cpu: "20m" + limits: + memory: "64Mi" + cpu: "100m" + extraEnv: [] + # - name: SOME_VAR + # value: 'some value' + nodeSelector: + beta.kubernetes.io/os: linux + podAnnotations: {} + typha_autoscaler: + resources: + requests: + memory: "16Mi" + cpu: "10m" + limits: + memory: "32Mi" + cpu: "10m" + tolerations: [] + nodeSelector: + beta.kubernetes.io/os: linux + podAnnotations: {} + +autoscaler: + tag: "1.7.1" + image: k8s.gcr.io/cluster-proportional-autoscaler-amd64 diff --git a/charts/aws-vpc-cni/.helmignore b/charts/aws-vpc-cni/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/aws-vpc-cni/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/aws-vpc-cni/Chart.yaml b/charts/aws-vpc-cni/Chart.yaml new file mode 100644 index 00000000000..c66ea0f61cf --- /dev/null +++ b/charts/aws-vpc-cni/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +name: aws-vpc-cni +version: 1.1.0 +appVersion: "v1.7.3" +description: A Helm chart for the AWS VPC CNI +icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png +home: https://github.com/jayanthvn/amazon-vpc-cni-k8s +sources: + - https://github.com/jayanthvn/amazon-vpc-cni-k8ss +keywords: + - eks + - cni + - networking + - vpc +maintainers: + - name: Jayanth Varavani + url: https://github.com/jayanthvn + email: jayanthvn@users.noreply.github.com +engine: gotpl diff --git a/charts/aws-vpc-cni/README.md b/charts/aws-vpc-cni/README.md new file mode 100644 index 00000000000..a053409e9ce --- /dev/null +++ b/charts/aws-vpc-cni/README.md @@ -0,0 +1,87 @@ +# AWS VPC CNI + +This chart installs the AWS CNI Daemonset: https://github.com/aws/amazon-vpc-cni-k8s + +## Prerequisites + +- Kubernetes 1.11+ running on AWS + +## Installing the Chart + +First add the EKS repository to Helm: + +```shell +helm repo add eks https://aws.github.io/eks-charts +``` + +To install the chart with the release name `aws-vpc-cni` and default configuration: + +```shell +$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni +``` + +To install into an EKS cluster where the CNI is already installed, see [this section below](#adopting-the-existing-aws-node-resources-in-an-eks-cluster) + +## Configuration + +The following table lists the configurable parameters for this chart and their default values. + +| Parameter | Description | Default | +| ------------------------|---------------------------------------------------------|-------------------------------------| +| `affinity` | Map of node/pod affinities | `{}` | +| `env` | List of environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) | +| `fullnameOverride` | Override the fullname of the chart | `aws-node` | +| `image.region` | ECR repository region to use. Should match your cluster | `us-west-2` | +| `image.tag` | Image tag | `v1.7.3` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.override` | A custom docker image to use | `nil` | +| `imagePullSecrets` | Docker registry pull secret | `[]` | +| `init.image.region` | ECR repository region to use. Should match your cluster | `us-west-2` | +| `init.image.tag` | Image tag | `v1.7.3` | +| `init.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `init.image.override` | A custom docker image to use | `nil` | +| `init.env` | List of init container environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) | +| `init.securityContext` | Init container Security context | `privileged: true` | +| `originalMatchLabels` | Use the original daemonset matchLabels | `false` | +| `nameOverride` | Override the name of the chart | `aws-node` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `podSecurityContext` | Pod Security Context | `{}` | +| `podAnnotations` | annotations to add to each pod | `{}` | +| `priorityClassName` | Name of the priorityClass | `system-node-critical` | +| `resources` | Resources for the pods | `requests.cpu: 10m` | +| `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` | +| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` | +| `livenessProbe` | Livenness probe settings for daemonset | (see `values.yaml`) | +| `readinessProbe` | Readiness probe settings for daemonset | (see `values.yaml`) | +| `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` | +| `tolerations` | Optional deployment tolerations | `[]` | +| `updateStrategy` | Optional update strategy | `type: RollingUpdate` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: + +```shell +$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni --values values.yaml +``` + +## Adopting the existing aws-node resources in an EKS cluster + +If you do not want to delete the existing aws-node resources in your cluster that run the aws-vpc-cni and then install this helm chart, you can adopt the resources into a release instead. This process is highlighted in this [PR comment](https://github.com/aws/eks-charts/issues/57#issuecomment-628403245). Once you have annotated and labeled all the resources this chart specifies, enable the `originalMatchLabels` flag, and also set `crd.create` to false on the helm release and run an update. If you have been careful this should not diff and leave all the resources unmodified and now under management of helm. + +Here is an example script to modify the existing resources: + +WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release. +``` +#!/usr/bin/env bash + +set -euo pipefail + +# don't import the crd. Helm cant manage the lifecycle of it anyway. +for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do + echo "setting annotations and labels on $kind/aws-node" + kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE + kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system + kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm +done +``` diff --git a/charts/aws-vpc-cni/templates/NOTES.txt b/charts/aws-vpc-cni/templates/NOTES.txt new file mode 100644 index 00000000000..9efc49f31a3 --- /dev/null +++ b/charts/aws-vpc-cni/templates/NOTES.txt @@ -0,0 +1,4 @@ + +{{ .Release.Name }} has been installed or updated. To check the status of pods, run: + +kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "aws-vpc-cni.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" diff --git a/charts/aws-vpc-cni/templates/_helpers.tpl b/charts/aws-vpc-cni/templates/_helpers.tpl new file mode 100644 index 00000000000..230aed77161 --- /dev/null +++ b/charts/aws-vpc-cni/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-vpc-cni.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-vpc-cni.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-vpc-cni.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-vpc-cni.labels" -}} +app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} +helm.sh/chart: {{ include "aws-vpc-cni.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: aws-node +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-vpc-cni.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-vpc-cni.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/charts/aws-vpc-cni/templates/clusterrole.yaml b/charts/aws-vpc-cni/templates/clusterrole.yaml new file mode 100644 index 00000000000..0635b5edd31 --- /dev/null +++ b/charts/aws-vpc-cni/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +rules: + - apiGroups: + - crd.k8s.amazonaws.com + resources: + - eniconfigs + verbs: ["list", "watch", "get"] + - apiGroups: [""] + resources: + - pods + - namespaces + verbs: ["list", "watch", "get"] + - apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch", "get", "update"] + - apiGroups: ["extensions"] + resources: + - '*' + verbs: ["list", "watch"] diff --git a/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/charts/aws-vpc-cni/templates/clusterrolebinding.yaml new file mode 100644 index 00000000000..5cadd1b1ca2 --- /dev/null +++ b/charts/aws-vpc-cni/templates/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "aws-vpc-cni.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "aws-vpc-cni.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/charts/aws-vpc-cni/templates/customresourcedefinition.yaml new file mode 100644 index 00000000000..bdd29e7a8bd --- /dev/null +++ b/charts/aws-vpc-cni/templates/customresourcedefinition.yaml @@ -0,0 +1,19 @@ +{{- if .Values.crd.create -}} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: eniconfigs.crd.k8s.amazonaws.com + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +spec: + scope: Cluster + group: crd.k8s.amazonaws.com + versions: + - name: v1alpha1 + served: true + storage: true + names: + plural: eniconfigs + singular: eniconfig + kind: ENIConfig +{{- end -}} diff --git a/charts/aws-vpc-cni/templates/daemonset.yaml b/charts/aws-vpc-cni/templates/daemonset.yaml new file mode 100644 index 00000000000..1c95741b211 --- /dev/null +++ b/charts/aws-vpc-cni/templates/daemonset.yaml @@ -0,0 +1,158 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +spec: + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + selector: + matchLabels: +{{- if .Values.originalMatchLabels }} + k8s-app: aws-node +{{- else }} + app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + k8s-app: aws-node + spec: + priorityClassName: "{{ .Values.priorityClassName }}" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "beta.kubernetes.io/os" + operator: In + values: + - linux + - key: "beta.kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + - matchExpressions: + - key: "kubernetes.io/os" + operator: In + values: + - linux + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }} + hostNetwork: true + initContainers: + - name: aws-vpc-cni-init + image: "{{- if .Values.initContainers.image.override }}{{- .Values.initContainers.image.override }}{{- else }}{{- .Values.initContainers.image.account }}.dkr.ecr.{{- .Values.initContainers.image.region }}.{{- .Values.initContainers.image.domain }}/amazon-k8s-cni-init:{{- .Values.initContainers.image.tag }}{{- end}}" + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + env: +{{- range $key, $value := .Values.initContainers.env }} + - name: {{ $key }} + value: {{ $value | quote }} +{{- end }} + securityContext: + {{- toYaml .Values.initContainers.securityContext | nindent 12 }} + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + terminationGracePeriodSeconds: 10 + tolerations: + - operator: Exists + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: aws-node + image: "{{- if .Values.image.override }}{{- .Values.image.override }}{{- else }}{{- .Values.image.account }}.dkr.ecr.{{- .Values.image.region }}.{{- .Values.image.domain }}/amazon-k8s-cni:{{- .Values.image.tag }}{{- end}}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 61678 + name: metrics + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 12 }} + env: +{{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} +{{- end }} + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /host/var/log/aws-routed-eni + name: log-dir + - mountPath: /var/run/dockershim.sock + name: dockershim + - mountPath: /var/run/aws-node + name: run-dir + - mountPath: /run/xtables.lock + name: xtables-lock + volumes: + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + - name: dockershim + hostPath: + path: /var/run/dockershim.sock + - name: log-dir + hostPath: + path: /var/log/aws-routed-eni + type: DirectoryOrCreate + - name: run-dir + hostPath: + path: /var/run/aws-node + type: DirectoryOrCreate + - name: xtables-lock + hostPath: + path: /run/xtables.lock + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/aws-vpc-cni/templates/serviceaccount.yaml b/charts/aws-vpc-cni/templates/serviceaccount.yaml new file mode 100644 index 00000000000..88515669942 --- /dev/null +++ b/charts/aws-vpc-cni/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "aws-vpc-cni.serviceAccountName" . }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +{{- end -}} diff --git a/charts/aws-vpc-cni/values.yaml b/charts/aws-vpc-cni/values.yaml new file mode 100644 index 00000000000..4f46c661508 --- /dev/null +++ b/charts/aws-vpc-cni/values.yaml @@ -0,0 +1,110 @@ +# Default values for aws-vpc-cni. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This default name override is to maintain backwards compatability with +# existing naming +nameOverride: aws-node + +initContainers: + image: + tag: v1.7.3 + region: us-west-2 + account: "602401143452" + domain: amazonaws.com + pullPolicy: Always + # Set to use custom image + # override: "repo/org/image:tag" + env: + DISABLE_TCP_EARLY_DEMUX: "false" + securityContext: + privileged: true + +image: + region: us-west-2 + account: "602401143452" + domain: amazonaws.com + tag: v1.7.3 + pullPolicy: Always + # Set to use custom image + # override: "repo/org/image:tag" + +# The CNI supports a number of environment variable settings +# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables +env: + ADDITIONAL_ENI_TAGS: "{}" + AWS_VPC_CNI_NODE_PORT_SUPPORT: "true" + AWS_VPC_ENI_MTU: "9001" + AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false" + AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false" + AWS_VPC_K8S_CNI_EXTERNALSNAT: "false" + AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log" + AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG + AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng" + AWS_VPC_K8S_CNI_VETHPREFIX: eni + AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log" + AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG + DISABLE_INTROSPECTION: "false" + DISABLE_METRICS: "false" + ENABLE_POD_ENI: "false" + WARM_ENI_TARGET: "1" + +# this flag enables you to use the match label that was present in the original daemonset deployed by EKS +# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release +originalMatchLabels: false + +imagePullSecrets: [] + +fullnameOverride: "aws-node" + +priorityClassName: system-node-critical + +podSecurityContext: {} + +podAnnotations: {} + +securityContext: + capabilities: + add: + - "NET_ADMIN" + +crd: + create: true + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +livenessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + initialDelaySeconds: 60 + +readinessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + initialDelaySeconds: 1 + +resources: + requests: + cpu: 10m + +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: "10%" + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/regions.json b/charts/regions.json new file mode 100644 index 00000000000..c31783a1807 --- /dev/null +++ b/charts/regions.json @@ -0,0 +1,22 @@ + [ + { + "ecrRegion": "us-west-2", + "ecrAccount": "602401143452", + "ecrDomain": "amazonaws.com" + }, + { + "ecrRegion": "us-gov-east-1", + "ecrAccount": "151742754352", + "ecrDomain": "amazonaws.com" + }, + { + "ecrRegion": "us-gov-west-1", + "ecrAccount": "013241004608", + "ecrDomain": "amazonaws.com" + }, + { + "ecrRegion": "cn-northwest-1", + "ecrAccount": "961992271922", + "ecrDomain": "amazonaws.com.cn" + } +] \ No newline at end of file diff --git a/scripts/generate-cni-yaml.sh b/scripts/generate-cni-yaml.sh new file mode 100755 index 00000000000..16bee861a27 --- /dev/null +++ b/scripts/generate-cni-yaml.sh @@ -0,0 +1,142 @@ +#!/bin/bash +set -euo pipefail + +SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" + +PLATFORM=$(uname | tr '[:upper:]' '[:lower:]') +HELM_VERSION="3.0.2" +NAMESPACE="kube-system" + +MAKEFILEPATH=$SCRIPTPATH/../Makefile +VERSION=$(make -s -f $MAKEFILEPATH version) +BUILD_DIR=$SCRIPTPATH/../build/cni-rel-yamls/$VERSION + +REGIONS_FILE=$SCRIPTPATH/../charts/regions.json +INDV_RESOURCES_DIR=$BUILD_DIR/individual-resources +CNI_TAR_RESOURCES_FILE=$BUILD_DIR/cni_individual-resources.tar +METRICS_TAR_RESOURCES_FILE=$BUILD_DIR/cni_metrics_individual-resources.tar +CALICO_TAR_RESOURCES_FILE=$BUILD_DIR/calico_individual-resources.tar +CNI_RESOURCES_YAML=$BUILD_DIR/aws-vpc-cni +METRICS_RESOURCES_YAML=$BUILD_DIR/cni-metrics-helper +CALICO_RESOURCES_YAML=$BUILD_DIR/calico.yaml +mkdir -p $INDV_RESOURCES_DIR + + +USAGE=$(cat << 'EOM' + Usage: generate-cni-yaml [-n ] + Generates the kubernetes yaml resource files from the helm chart + and places them into the build dir. + Example: generate-cni-yaml -n kube-system + Optional: + -n Kubernetes namespace +EOM +) + +# Process our input arguments +while getopts "vn:" opt; do + case ${opt} in + n ) # K8s namespace + NAMESPACE=$OPTARG + ;; + v ) # Verbose + set -x + ;; + \? ) + echo "$USAGE" 1>&2 + exit + ;; + esac +done + +curl -L https://get.helm.sh/helm-v$HELM_VERSION-$PLATFORM-amd64.tar.gz | tar zxf - -C $BUILD_DIR +mv $BUILD_DIR/$PLATFORM-amd64/helm $BUILD_DIR/. +rm -rf $BUILD_DIR/$PLATFORM-amd64 +chmod +x $BUILD_DIR/helm + +jq -c '.[]' $REGIONS_FILE | while read i; do + ecrRegion=`echo $i | jq '.ecrRegion' -r` + ecrAccount=`echo $i | jq '.ecrAccount' -r` + ecrDomain=`echo $i | jq '.ecrDomain' -r` + + if [ "$ecrRegion" = "us-west-2" ]; then + NEW_CNI_RESOURCES_YAML="${CNI_RESOURCES_YAML}.yaml" + NEW_METRICS_RESOURCES_YAML="${METRICS_RESOURCES_YAML}.yaml" + elif [ "$ecrRegion" = "cn-northwest-1" ]; then + NEW_CNI_RESOURCES_YAML="${CNI_RESOURCES_YAML}-cn.yaml" + NEW_METRICS_RESOURCES_YAML="${METRICS_RESOURCES_YAML}-cn.yaml" + else + NEW_CNI_RESOURCES_YAML="${CNI_RESOURCES_YAML}-${ecrRegion}.yaml" + NEW_METRICS_RESOURCES_YAML="${METRICS_RESOURCES_YAML}-${ecrRegion}.yaml" + fi + $BUILD_DIR/helm template charts/aws-vpc-cni \ + --set initContainers.image.region=$ecrRegion,\ + initContainers.image.account=$ecrAccount,\ + initContainers.image.domain=$ecrDomain,\ + image.region=$ecrRegion,\ + image.account=$ecrAccount,\ + image.domain=$ecrDomain \ + --namespace $NAMESPACE \ + $SCRIPTPATH/../charts/aws-vpc-cni > $NEW_CNI_RESOURCES_YAML + cat $NEW_CNI_RESOURCES_YAML | grep -v 'helm.sh\|app.kubernetes.io/managed-by: Helm' > $BUILD_DIR/helm_annotations_removed.yaml + mv $BUILD_DIR/helm_annotations_removed.yaml $NEW_CNI_RESOURCES_YAML + + $BUILD_DIR/helm template charts/cni-metrics-helper \ + --set initContainers.image.region=$ecrRegion,\ + initContainers.image.account=$ecrAccount,\ + initContainers.image.domain=$ecrDomain,\ + image.region=$ecrRegion,\ + image.account=$ecrAccount,\ + image.domain=$ecrDomain \ + --namespace $NAMESPACE \ + $SCRIPTPATH/../charts/cni-metrics-helper > $NEW_METRICS_RESOURCES_YAML + cat $NEW_METRICS_RESOURCES_YAML | grep -v 'helm.sh\|app.kubernetes.io/managed-by: Helm' > $BUILD_DIR/helm_annotations_removed.yaml + mv $BUILD_DIR/helm_annotations_removed.yaml $NEW_METRICS_RESOURCES_YAML +done + +$BUILD_DIR/helm template charts/aws-calico \ + --namespace $NAMESPACE \ + $SCRIPTPATH/../charts/aws-calico > $CALICO_RESOURCES_YAML + cat $CALICO_RESOURCES_YAML | grep -v 'helm.sh\|app.kubernetes.io/managed-by: Helm' > $BUILD_DIR/helm_annotations_removed.yaml + mv $BUILD_DIR/helm_annotations_removed.yaml $CALICO_RESOURCES_YAML + +$BUILD_DIR/helm template \ + --namespace $NAMESPACE \ + --output-dir $INDV_RESOURCES_DIR/ \ + $SCRIPTPATH/../charts/aws-vpc-cni/ + +for i in $INDV_RESOURCES_DIR/aws-vpc-cni/templates/*; do + cat $i | grep -v 'helm.sh\|app.kubernetes.io/managed-by: Helm' > $BUILD_DIR/helm_annotations_removed.yaml + mv $BUILD_DIR/helm_annotations_removed.yaml $i +done + +$BUILD_DIR/helm template \ + --namespace $NAMESPACE \ + --output-dir $INDV_RESOURCES_DIR/ \ + $SCRIPTPATH/../charts/cni-metrics-helper/ + +for i in $INDV_RESOURCES_DIR/cni-metrics-helper/templates/*; do + cat $i | grep -v 'helm.sh\|app.kubernetes.io/managed-by: Helm' > $BUILD_DIR/helm_annotations_removed.yaml + mv $BUILD_DIR/helm_annotations_removed.yaml $i +done + +$BUILD_DIR/helm template \ + --namespace $NAMESPACE \ + --output-dir $INDV_RESOURCES_DIR/ \ + $SCRIPTPATH/../charts/aws-calico/ + +for i in $INDV_RESOURCES_DIR/aws-calico/templates/*; do + cat $i | grep -v 'helm.sh\|app.kubernetes.io/managed-by: Helm' > $BUILD_DIR/helm_annotations_removed.yaml + mv $BUILD_DIR/helm_annotations_removed.yaml $i +done + +cd $INDV_RESOURCES_DIR/aws-vpc-cni/ && tar cvf $CNI_TAR_RESOURCES_FILE templates/* +cd $INDV_RESOURCES_DIR/cni-metrics-helper/ && tar cvf $METRICS_TAR_RESOURCES_FILE templates/* +cd $INDV_RESOURCES_DIR/aws-calico/ && tar cvf $CALICO_TAR_RESOURCES_FILE templates/* +cd $SCRIPTPATH + +echo "Generated aws-vpc-cni and cni-metrics-helper kubernetes yaml resources files in:" +echo " - $CNI_RESOURCES_YAML" +echo " - $METRICS_RESOURCES_YAML" +echo " - $CNI_TAR_RESOURCES_FILE" +echo " - $METRICS_TAR_RESOURCES_FILE" +echo " - $CALICO_TAR_RESOURCES_FILE" \ No newline at end of file diff --git a/scripts/upload-resources-to-github.sh b/scripts/upload-resources-to-github.sh new file mode 100755 index 00000000000..964b0ac33d8 --- /dev/null +++ b/scripts/upload-resources-to-github.sh @@ -0,0 +1,124 @@ +#!/bin/bash +set -euo pipefail + +# Script to upload release assets to Github. +# This script cleans up after itself in cases of parital failures. i.e. either all assets are uploaded or none +SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" +VERSION=$(make -s -f $SCRIPTPATH/../Makefile version) +BUILD_DIR=$SCRIPTPATH/../build/cni-rel-yamls/$VERSION +BINARY_DIR=$SCRIPTPATH/../build/bin +CNI_TAR_RESOURCES_FILE=$BUILD_DIR/cni_individual-resources.tar +METRICS_TAR_RESOURCES_FILE=$BUILD_DIR/cni_metrics_individual-resources.tar +CALICO_TAR_RESOURCES_FILE=$BUILD_DIR/calico_individual-resources.tar +CNI_RESOURCES_YAML=$BUILD_DIR/aws-vpc-cni +METRICS_RESOURCES_YAML=$BUILD_DIR/cni-metrics-helper +CALICO_RESOURCES_YAML=$BUILD_DIR/calico.yaml +REGIONS_FILE=$SCRIPTPATH/../charts/regions.json + +BINARIES_ONLY="false" + +USAGE=$(cat << 'EOM' + Usage: upload-resources-to-github [-b] + Upload release assets to GitHub + + Example: upload-resources-to-github -b + Optional: + -b Upload binaries only [DEFAULT: upload all the assets] +EOM +) + +# Process our input arguments +while getopts "b" opt; do + case ${opt} in + b ) # Binaries only + BINARIES_ONLY="true" + ;; + \? ) + echo "$USAGE" 1>&2 + exit + ;; + esac +done + +RELEASE_ID=$(curl -s -H "Authorization: token $GITHUB_TOKEN" \ + https://api.github.com/repos/aws/amazon-vpc-cni-k8s/releases | \ + jq --arg VERSION "$VERSION" '.[] | select(.tag_name==$VERSION) | .id') + +ASSET_IDS_UPLOADED=() + +trap 'handle_errors_and_cleanup $?' EXIT + +handle_errors_and_cleanup() { + if [ $1 -eq 0 ]; then + exit 0 + fi + + if [[ ${#ASSET_IDS_UPLOADED[@]} -ne 0 ]]; then + echo -e "\nCleaning up assets uploaded in the current execution of the script" + for asset_id in "${ASSET_IDS_UPLOADED[@]}"; do + echo "Deleting asset $asset_id" + curl -X DELETE \ + -H "Authorization: token $GITHUB_TOKEN" \ + "https://api.github.com/repos/aws/amazon-vpc-cni-k8s/releases/assets/$asset_id" + done + exit $1 + fi +} + +# $1: absolute path to asset +upload_asset() { + resp=$(curl --write-out '%{http_code}' --silent \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: $(file -b --mime-type $1)" \ + --data-binary @$1 \ + "https://uploads.github.com/repos/aws/amazon-vpc-cni-k8s/releases/$RELEASE_ID/assets?name=$(basename $1)") + + response_code=$(echo $resp | sed 's/\(.*\)}//') + response_content=$(echo $resp | sed "s/$response_code//") + + # HTTP success code expected - 201 Created + if [[ $response_code -eq 201 ]]; then + asset_id=$(echo $response_content | jq '.id') + ASSET_IDS_UPLOADED+=("$asset_id") + echo "Created asset ID $asset_id successfully" + else + echo -e "❌ Upload failed with response code $response_code and message \n$response_content ❌" + exit 1 + fi +} + +jq -c '.[]' $REGIONS_FILE | while read i; do + ecrRegion=`echo $i | jq '.ecrRegion' -r` + ecrAccount=`echo $i | jq '.ecrAccount' -r` + ecrDomain=`echo $i | jq '.ecrDomain' -r` + + if [ "$ecrRegion" = "us-west-2" ]; then + NEW_CNI_RESOURCES_YAML="${CNI_RESOURCES_YAML}.yaml" + NEW_METRICS_RESOURCES_YAML="${METRICS_RESOURCES_YAML}.yaml" + elif [ "$ecrRegion" = "cn-northwest-1" ]; then + NEW_CNI_RESOURCES_YAML="${CNI_RESOURCES_YAML}-cn.yaml" + NEW_METRICS_RESOURCES_YAML="${METRICS_RESOURCES_YAML}-cn.yaml" + else + NEW_CNI_RESOURCES_YAML="${CNI_RESOURCES_YAML}-${ecrRegion}.yaml" + NEW_METRICS_RESOURCES_YAML="${METRICS_RESOURCES_YAML}-${ecrRegion}.yaml" + fi + RESOURCES_TO_UPLOAD=("$NEW_CNI_RESOURCES_YAML" "$NEW_METRICS_RESOURCES_YAML") + + COUNT=1 + echo -e "\nUploading release assets for release id '$RELEASE_ID' to Github" + for asset in ${RESOURCES_TO_UPLOAD[@]}; do + name=$(echo $asset | tr '/' '\n' | tail -1) + echo -e "\n $((COUNT++)). $name" + upload_asset $asset + done +done + +RESOURCES_TO_UPLOAD=("$CALICO_RESOURCES_YAML" "$CNI_TAR_RESOURCES_FILE" "$METRICS_TAR_RESOURCES_FILE" "$CALICO_TAR_RESOURCES_FILE") + +COUNT=1 +echo -e "\nUploading release assets for release id '$RELEASE_ID' to Github" +for asset in ${RESOURCES_TO_UPLOAD[@]}; do + name=$(echo $asset | tr '/' '\n' | tail -1) + echo -e "\n $((COUNT++)). $name" + upload_asset $asset +done