diff --git a/cluster/eksctl/ipv4/cluster.yaml b/cluster/eksctl/ipv4/cluster.yaml new file mode 100644 index 000000000..33a636693 --- /dev/null +++ b/cluster/eksctl/ipv4/cluster.yaml @@ -0,0 +1,42 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +availabilityZones: +- ${AWS_REGION}a +- ${AWS_REGION}b +- ${AWS_REGION}c + +metadata: + name: ${EKS_CLUSTER_NAME} + region: ${AWS_REGION} + version: '1.25' + tags: + karpenter.sh/discovery: ${EKS_CLUSTER_NAME} + created-by: eks-workshop-v2 + env: ${EKS_CLUSTER_NAME} + +iam: + withOIDC: true + +vpc: + cidr: 10.42.0.0/16 + clusterEndpoints: + privateAccess: true + publicAccess: true + +addons: +- name: vpc-cni + version: v1.12.5-eksbuild.2 + configurationValues: "{\"env\":{\"ENABLE_PREFIX_DELEGATION\":\"true\", \"ENABLE_POD_ENI\":\"true\", \"POD_SECURITY_GROUP_ENFORCING_MODE\":\"standard\"}}" + resolveConflicts: overwrite + +managedNodeGroups: +- name: default + desiredCapacity: 3 + minSize: 3 + maxSize: 6 + instanceType: m5.large + privateNetworking: true + releaseVersion: 1.25.6-20230304 + labels: + workshop-default: 'yes' diff --git a/cluster/eksctl/ipv6/cluster.yaml b/cluster/eksctl/ipv6/cluster.yaml new file mode 100644 index 000000000..3aeef2fa2 --- /dev/null +++ b/cluster/eksctl/ipv6/cluster.yaml @@ -0,0 +1,43 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +availabilityZones: +- ${AWS_REGION}a +- ${AWS_REGION}b +- ${AWS_REGION}c + +metadata: + name: ${EKS_CLUSTER_NAME} + region: ${AWS_REGION} + version: '1.25' + tags: + karpenter.sh/discovery: ${EKS_CLUSTER_NAME} + created-by: eks-workshop-v2 + env: ${EKS_CLUSTER_NAME} + +iam: + withOIDC: true + +kubernetesNetworkConfig: + ipFamily: IPv6 + +vpc: + clusterEndpoints: + privateAccess: true + publicAccess: true + +addons: +- name: vpc-cni +- name: kube-proxy +- name: coredns + +managedNodeGroups: +- name: default + desiredCapacity: 3 + minSize: 3 + maxSize: 6 + instanceType: m5.large + privateNetworking: true + releaseVersion: 1.25.6-20230304 + labels: + workshop-default: 'yes' diff --git a/hack/run-tests.sh b/hack/run-tests.sh index 3d97b3e46..9afb29bfa 100755 --- a/hack/run-tests.sh +++ b/hack/run-tests.sh @@ -49,10 +49,18 @@ if [ ! -z "$BACKGROUND" ]; then background_args="--detach" fi +network_family=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --query "cluster.kubernetesNetworkConfig.ipFamily" --output text) + +skip_tags_args="--skip-tags ipv6" + +if [[ "$network_family" == "ipv6" ]]; then + skip_tags_args="--skip-tags ipv4" +fi + echo "Running test suite..." $CONTAINER_CLI run $background_args \ -v $SCRIPT_DIR/../website/docs:/content \ -v $SCRIPT_DIR/../manifests:/manifests \ -e 'EKS_CLUSTER_NAME' -e 'AWS_REGION' \ - $aws_credential_args $container_image -g "{$module,$module/**}" --hook-timeout 1200 --timeout 1200 ${AWS_EKS_WORKSHOP_TEST_FLAGS} + $aws_credential_args $container_image -g "{$module,$module/**}" --hook-timeout 1200 --timeout 1200 $skip_tags_args --debug ${AWS_EKS_WORKSHOP_TEST_FLAGS} diff --git a/lab/bin/reset-environment b/lab/bin/reset-environment index dc2134584..38fc77d99 100644 --- a/lab/bin/reset-environment +++ b/lab/bin/reset-environment @@ -82,6 +82,7 @@ mkdir -p /eks-workshop/terraform cp $manifests_path/.workshop/terraform/base.tf /eks-workshop/terraform export TF_VAR_eks_cluster_id="$EKS_CLUSTER_NAME" +export TF_VAR_eks_network_family="$EKS_IP_FAMILY" RESOURCES_PRECREATED=${RESOURCES_PRECREATED:-""} @@ -117,7 +118,9 @@ if [ ! -z "$module" ]; then terraform -chdir="$tf_dir" apply -refresh=false --auto-approve > /tmp/terraform-apply.log fi - if [ -d "$module_path/.workshop/manifests" ]; then + if [ -d "$module_path/.workshop/manifests/$EKS_IP_FAMILY" ]; then + kubectl apply -k "$module_path/.workshop/manifests/$EKS_IP_FAMILY" > /dev/null + elif [ -d "$module_path/.workshop/manifests" ]; then kubectl apply -k "$module_path/.workshop/manifests" > /dev/null fi fi diff --git a/lab/bin/use-cluster b/lab/bin/use-cluster index 2b2849e5d..e3820b47d 100644 --- a/lab/bin/use-cluster +++ b/lab/bin/use-cluster @@ -22,6 +22,8 @@ fi default_nodegroup_name=$(echo "$nodegroup_output" | jq '.nodegroups[0]') +network_family=$(aws eks describe-cluster --name $cluster_name --query "cluster.kubernetesNetworkConfig.ipFamily" --output text) + cat << EOT > /home/ec2-user/.bashrc.d/env.bash aws eks update-kubeconfig --name ${cluster_name} set -a @@ -31,7 +33,7 @@ EKS_DEFAULT_MNG_MIN=3 EKS_DEFAULT_MNG_MAX=6 EKS_DEFAULT_MNG_DESIRED=3 AWS_ACCOUNT_ID=${AWS_ACCOUNT_ID} -EKS_IP_FAMILY=ipv4 +EKS_IP_FAMILY=${network_family} set +a EOT diff --git a/lab/bin/wait-for-lb b/lab/bin/wait-for-lb index 5418d5398..2fe5b5577 100644 --- a/lab/bin/wait-for-lb +++ b/lab/bin/wait-for-lb @@ -9,7 +9,7 @@ echo "Waiting for ${host}..." EXIT_CODE=0 timeout -s TERM 600 bash -c \ - 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${host}/home)" != "200" ]];\ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${host})" != "200" ]];\ do sleep 5;\ done' || EXIT_CODE=$? diff --git a/manifests/.workshop/terraform/base.tf b/manifests/.workshop/terraform/base.tf index 57441f153..9a1f95251 100644 --- a/manifests/.workshop/terraform/base.tf +++ b/manifests/.workshop/terraform/base.tf @@ -37,6 +37,11 @@ variable "resources_precreated" { default = false } +variable "eks_network_family" { + type = string + default = "ipv4" +} + data "aws_partition" "current" {} data "aws_caller_identity" "current" {} data "aws_region" "current" {} @@ -81,6 +86,9 @@ locals { eks_cluster_endpoint = data.aws_eks_cluster.eks_cluster.endpoint eks_cluster_version = data.aws_eks_cluster.eks_cluster.version + is_ipv4 = var.eks_network_family == "ipv4" + is_ipv6 = ! local.is_ipv4 + addon_context = { aws_caller_identity_account_id = data.aws_caller_identity.current.account_id aws_caller_identity_arn = data.aws_caller_identity.current.arn diff --git a/manifests/modules/automation/controlplanes/ack/.workshop/manifests/kustomization.yaml b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/base/kustomization.yaml similarity index 100% rename from manifests/modules/automation/controlplanes/ack/.workshop/manifests/kustomization.yaml rename to manifests/modules/automation/controlplanes/ack/.workshop/manifests/base/kustomization.yaml diff --git a/manifests/modules/automation/controlplanes/ack/rds/application/nlb.yaml b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/base/nlb.yaml similarity index 97% rename from manifests/modules/automation/controlplanes/ack/rds/application/nlb.yaml rename to manifests/modules/automation/controlplanes/ack/.workshop/manifests/base/nlb.yaml index e56524651..91b5a0cad 100644 --- a/manifests/modules/automation/controlplanes/ack/rds/application/nlb.yaml +++ b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/base/nlb.yaml @@ -5,7 +5,7 @@ metadata: annotations: service.beta.kubernetes.io/aws-load-balancer-type: external service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip namespace: ui spec: type: LoadBalancer diff --git a/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv4/kustomization.yaml b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv4/kustomization.yaml new file mode 100644 index 000000000..2df98b936 --- /dev/null +++ b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv4/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base diff --git a/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv6/kustomization.yaml b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv6/kustomization.yaml new file mode 100644 index 000000000..cac42e686 --- /dev/null +++ b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv6/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base +patches: +- nlb.yaml diff --git a/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv6/nlb.yaml b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv6/nlb.yaml new file mode 100644 index 000000000..43f748596 --- /dev/null +++ b/manifests/modules/automation/controlplanes/ack/.workshop/manifests/ipv6/nlb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + namespace: ui + annotations: + service.beta.kubernetes.io/aws-load-balancer-ip-address-type: "dualstack" \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/ack/rds/application/deployment.yaml b/manifests/modules/automation/controlplanes/ack/rds/application/deployment.yaml index fa2858dd0..273f9713b 100644 --- a/manifests/modules/automation/controlplanes/ack/rds/application/deployment.yaml +++ b/manifests/modules/automation/controlplanes/ack/rds/application/deployment.yaml @@ -28,4 +28,4 @@ spec: valueFrom: secretKeyRef: name: catalog-db-ack - key: endpoint + key: endpoint \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/ack/rds/application/kustomization.yaml b/manifests/modules/automation/controlplanes/ack/rds/application/kustomization.yaml index ba5518880..502f94990 100644 --- a/manifests/modules/automation/controlplanes/ack/rds/application/kustomization.yaml +++ b/manifests/modules/automation/controlplanes/ack/rds/application/kustomization.yaml @@ -2,7 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization bases: - ../../../../../../base-application/catalog -resources: -- nlb.yaml patches: -- deployment.yaml +- deployment.yaml \ No newline at end of file diff --git a/manifests/modules/networking/vpc-lattice/.workshop/manifests/kustomization.yaml b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/base/kustomization.yaml similarity index 100% rename from manifests/modules/networking/vpc-lattice/.workshop/manifests/kustomization.yaml rename to manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/base/kustomization.yaml diff --git a/manifests/modules/automation/controlplanes/crossplane/application/nlb.yaml b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/base/nlb.yaml similarity index 97% rename from manifests/modules/automation/controlplanes/crossplane/application/nlb.yaml rename to manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/base/nlb.yaml index e56524651..91b5a0cad 100644 --- a/manifests/modules/automation/controlplanes/crossplane/application/nlb.yaml +++ b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/base/nlb.yaml @@ -5,7 +5,7 @@ metadata: annotations: service.beta.kubernetes.io/aws-load-balancer-type: external service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip namespace: ui spec: type: LoadBalancer diff --git a/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv4/kustomization.yaml b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv4/kustomization.yaml new file mode 100644 index 000000000..2df98b936 --- /dev/null +++ b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv4/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base diff --git a/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv6/kustomization.yaml b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv6/kustomization.yaml new file mode 100644 index 000000000..cac42e686 --- /dev/null +++ b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv6/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base +patches: +- nlb.yaml diff --git a/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv6/nlb.yaml b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv6/nlb.yaml new file mode 100644 index 000000000..43f748596 --- /dev/null +++ b/manifests/modules/automation/controlplanes/crossplane/.workshop/manifests/ipv6/nlb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + namespace: ui + annotations: + service.beta.kubernetes.io/aws-load-balancer-ip-address-type: "dualstack" \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/crossplane/application/deployment.yaml b/manifests/modules/automation/controlplanes/crossplane/application/deployment.yaml index 615dd4683..4b0e2ce18 100644 --- a/manifests/modules/automation/controlplanes/crossplane/application/deployment.yaml +++ b/manifests/modules/automation/controlplanes/crossplane/application/deployment.yaml @@ -28,4 +28,4 @@ spec: valueFrom: secretKeyRef: name: catalog-db-crossplane - key: endpoint + key: endpoint \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/crossplane/application/kustomization.yaml b/manifests/modules/automation/controlplanes/crossplane/application/kustomization.yaml index 49d186779..08831d2fb 100644 --- a/manifests/modules/automation/controlplanes/crossplane/application/kustomization.yaml +++ b/manifests/modules/automation/controlplanes/crossplane/application/kustomization.yaml @@ -2,7 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization bases: - ../../../../base-application/catalog -resources: -- nlb.yaml patches: -- deployment.yaml +- deployment.yaml \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/crossplane/compositions/application/deployment.yaml b/manifests/modules/automation/controlplanes/crossplane/compositions/application/deployment.yaml deleted file mode 100644 index b3ca68b7c..000000000 --- a/manifests/modules/automation/controlplanes/crossplane/compositions/application/deployment.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: catalog - namespace: catalog -spec: - template: - spec: - containers: - - name: catalog - env: - - name: DB_USER - valueFrom: - secretKeyRef: - name: catalog-db-composition - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: catalog-db-composition - key: password - - name: DB_READ_ENDPOINT - valueFrom: - secretKeyRef: - name: catalog-db-composition - key: endpoint - - name: DB_ENDPOINT - valueFrom: - secretKeyRef: - name: catalog-db-composition - key: endpoint diff --git a/manifests/modules/exposing/ingress/creating-ingress/ingress.yaml b/manifests/modules/exposing/ingress/creating-ingress/ipv4/ingress.yaml similarity index 100% rename from manifests/modules/exposing/ingress/creating-ingress/ingress.yaml rename to manifests/modules/exposing/ingress/creating-ingress/ipv4/ingress.yaml diff --git a/manifests/modules/exposing/ingress/creating-ingress/kustomization.yaml b/manifests/modules/exposing/ingress/creating-ingress/ipv4/kustomization.yaml similarity index 100% rename from manifests/modules/exposing/ingress/creating-ingress/kustomization.yaml rename to manifests/modules/exposing/ingress/creating-ingress/ipv4/kustomization.yaml diff --git a/manifests/modules/exposing/ingress/creating-ingress/ipv6/ingress.yaml b/manifests/modules/exposing/ingress/creating-ingress/ipv6/ingress.yaml new file mode 100644 index 000000000..0f61a6edf --- /dev/null +++ b/manifests/modules/exposing/ingress/creating-ingress/ipv6/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ui + namespace: ui + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/healthcheck-path: /actuator/health/liveness + # HIGHLIGHT + alb.ingress.kubernetes.io/ip-address-type: dualstack +spec: + ingressClassName: alb + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ui + port: + number: 80 diff --git a/manifests/modules/exposing/ingress/creating-ingress/ipv6/kustomization.yaml b/manifests/modules/exposing/ingress/creating-ingress/ipv6/kustomization.yaml new file mode 100644 index 000000000..69c9e22ed --- /dev/null +++ b/manifests/modules/exposing/ingress/creating-ingress/ipv6/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +resources: +- ingress.yaml diff --git a/manifests/modules/exposing/ingress/multiple-ingress/ingress-catalog.yaml b/manifests/modules/exposing/ingress/multiple-ingress/ipv4/ingress-catalog.yaml similarity index 100% rename from manifests/modules/exposing/ingress/multiple-ingress/ingress-catalog.yaml rename to manifests/modules/exposing/ingress/multiple-ingress/ipv4/ingress-catalog.yaml diff --git a/manifests/modules/exposing/ingress/multiple-ingress/ingress-ui.yaml b/manifests/modules/exposing/ingress/multiple-ingress/ipv4/ingress-ui.yaml similarity index 100% rename from manifests/modules/exposing/ingress/multiple-ingress/ingress-ui.yaml rename to manifests/modules/exposing/ingress/multiple-ingress/ipv4/ingress-ui.yaml diff --git a/manifests/modules/exposing/ingress/multiple-ingress/kustomization.yaml b/manifests/modules/exposing/ingress/multiple-ingress/ipv4/kustomization.yaml similarity index 100% rename from manifests/modules/exposing/ingress/multiple-ingress/kustomization.yaml rename to manifests/modules/exposing/ingress/multiple-ingress/ipv4/kustomization.yaml diff --git a/manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-catalog.yaml b/manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-catalog.yaml new file mode 100644 index 000000000..e8e7fed30 --- /dev/null +++ b/manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-catalog.yaml @@ -0,0 +1,24 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: catalog + namespace: catalog + labels: + app.kubernetes.io/created-by: eks-workshop + annotations: + alb.ingress.kubernetes.io/target-type: ip + # HIGHLIGHT + alb.ingress.kubernetes.io/group.name: retail-app-group + alb.ingress.kubernetes.io/ip-address-type: dualstack +spec: + ingressClassName: alb + rules: + - http: + paths: + - path: /catalogue + pathType: Prefix + backend: + service: + name: catalog + port: + number: 80 diff --git a/manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-ui.yaml b/manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-ui.yaml new file mode 100644 index 000000000..4097faa09 --- /dev/null +++ b/manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-ui.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ui + namespace: ui + labels: + app.kubernetes.io/created-by: eks-workshop + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/healthcheck-path: /actuator/health/liveness + # HIGHLIGHT + alb.ingress.kubernetes.io/group.name: retail-app-group + alb.ingress.kubernetes.io/ip-address-type: dualstack +spec: + ingressClassName: alb + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ui + port: + number: 80 diff --git a/manifests/modules/exposing/ingress/multiple-ingress/ipv6/kustomization.yaml b/manifests/modules/exposing/ingress/multiple-ingress/ipv6/kustomization.yaml new file mode 100644 index 000000000..1d124e8cb --- /dev/null +++ b/manifests/modules/exposing/ingress/multiple-ingress/ipv6/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +resources: +- ingress-catalog.yaml +- ingress-ui.yaml diff --git a/manifests/modules/exposing/load-balancer/ip-mode/kustomization.yaml b/manifests/modules/exposing/load-balancer/ipv4/ip-mode/kustomization.yaml similarity index 100% rename from manifests/modules/exposing/load-balancer/ip-mode/kustomization.yaml rename to manifests/modules/exposing/load-balancer/ipv4/ip-mode/kustomization.yaml diff --git a/manifests/modules/exposing/load-balancer/ip-mode/nlb.yaml b/manifests/modules/exposing/load-balancer/ipv4/ip-mode/nlb.yaml similarity index 100% rename from manifests/modules/exposing/load-balancer/ip-mode/nlb.yaml rename to manifests/modules/exposing/load-balancer/ipv4/ip-mode/nlb.yaml diff --git a/manifests/modules/exposing/load-balancer/nlb/kustomization.yaml b/manifests/modules/exposing/load-balancer/ipv4/nlb/kustomization.yaml similarity index 100% rename from manifests/modules/exposing/load-balancer/nlb/kustomization.yaml rename to manifests/modules/exposing/load-balancer/ipv4/nlb/kustomization.yaml diff --git a/manifests/modules/automation/controlplanes/ack/.workshop/manifests/nlb.yaml b/manifests/modules/exposing/load-balancer/ipv4/nlb/nlb.yaml similarity index 100% rename from manifests/modules/automation/controlplanes/ack/.workshop/manifests/nlb.yaml rename to manifests/modules/exposing/load-balancer/ipv4/nlb/nlb.yaml diff --git a/manifests/modules/exposing/load-balancer/ipv6/ip-mode/kustomization.yaml b/manifests/modules/exposing/load-balancer/ipv6/ip-mode/kustomization.yaml new file mode 100644 index 000000000..c707829c1 --- /dev/null +++ b/manifests/modules/exposing/load-balancer/ipv6/ip-mode/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../nlb +patches: +- nlb.yaml diff --git a/manifests/modules/exposing/load-balancer/ipv6/ip-mode/nlb.yaml b/manifests/modules/exposing/load-balancer/ipv6/ip-mode/nlb.yaml new file mode 100644 index 000000000..dd3720b20 --- /dev/null +++ b/manifests/modules/exposing/load-balancer/ipv6/ip-mode/nlb.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + annotations: + # HIGHLIGHT + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + namespace: ui diff --git a/manifests/modules/automation/controlplanes/crossplane/compositions/application/kustomization.yaml b/manifests/modules/exposing/load-balancer/ipv6/nlb/kustomization.yaml similarity index 56% rename from manifests/modules/automation/controlplanes/crossplane/compositions/application/kustomization.yaml rename to manifests/modules/exposing/load-balancer/ipv6/nlb/kustomization.yaml index ba5518880..102d3670e 100644 --- a/manifests/modules/automation/controlplanes/crossplane/compositions/application/kustomization.yaml +++ b/manifests/modules/exposing/load-balancer/ipv6/nlb/kustomization.yaml @@ -1,8 +1,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization bases: -- ../../../../../../base-application/catalog resources: - nlb.yaml -patches: -- deployment.yaml diff --git a/manifests/modules/exposing/load-balancer/ipv6/nlb/nlb.yaml b/manifests/modules/exposing/load-balancer/ipv6/nlb/nlb.yaml new file mode 100644 index 000000000..ff3094043 --- /dev/null +++ b/manifests/modules/exposing/load-balancer/ipv6/nlb/nlb.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance + # HIGHLIGHT + service.beta.kubernetes.io/aws-load-balancer-ip-address-type: "dualstack" + namespace: ui +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 8080 + name: http + selector: + app.kubernetes.io/name: ui + app.kubernetes.io/instance: ui + app.kubernetes.io/component: service diff --git a/manifests/modules/security/irsa/.workshop/manifests/kustomization.yaml b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/base/kustomization.yaml similarity index 100% rename from manifests/modules/security/irsa/.workshop/manifests/kustomization.yaml rename to manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/base/kustomization.yaml diff --git a/manifests/modules/exposing/load-balancer/nlb/nlb.yaml b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/base/nlb.yaml similarity index 97% rename from manifests/modules/exposing/load-balancer/nlb/nlb.yaml rename to manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/base/nlb.yaml index e56524651..91b5a0cad 100644 --- a/manifests/modules/exposing/load-balancer/nlb/nlb.yaml +++ b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/base/nlb.yaml @@ -5,7 +5,7 @@ metadata: annotations: service.beta.kubernetes.io/aws-load-balancer-type: external service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip namespace: ui spec: type: LoadBalancer diff --git a/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv4/kustomization.yaml b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv4/kustomization.yaml new file mode 100644 index 000000000..2df98b936 --- /dev/null +++ b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv4/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base diff --git a/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv6/kustomization.yaml b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv6/kustomization.yaml new file mode 100644 index 000000000..cac42e686 --- /dev/null +++ b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv6/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base +patches: +- nlb.yaml diff --git a/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv6/nlb.yaml b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv6/nlb.yaml new file mode 100644 index 000000000..43f748596 --- /dev/null +++ b/manifests/modules/networking/securitygroups-for-pods/.workshop/manifests/ipv6/nlb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + namespace: ui + annotations: + service.beta.kubernetes.io/aws-load-balancer-ip-address-type: "dualstack" \ No newline at end of file diff --git a/manifests/modules/networking/securitygroups-for-pods/rds/kustomization.yaml b/manifests/modules/networking/securitygroups-for-pods/rds/kustomization.yaml index 4a00eaee8..aaec85be3 100644 --- a/manifests/modules/networking/securitygroups-for-pods/rds/kustomization.yaml +++ b/manifests/modules/networking/securitygroups-for-pods/rds/kustomization.yaml @@ -24,7 +24,5 @@ vars: patches: - catalog-configMap.yaml - secrets.yaml -resources: -- nlb.yaml configurations: - configuration.yaml diff --git a/manifests/modules/networking/securitygroups-for-pods/rds/nlb.yaml b/manifests/modules/networking/securitygroups-for-pods/rds/nlb.yaml deleted file mode 100644 index e56524651..000000000 --- a/manifests/modules/networking/securitygroups-for-pods/rds/nlb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ui-nlb - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance - namespace: ui -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 8080 - name: http - selector: - app.kubernetes.io/name: ui - app.kubernetes.io/instance: ui - app.kubernetes.io/component: service diff --git a/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh b/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh index 1ec57e918..cb9a64f11 100644 --- a/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh +++ b/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh @@ -25,29 +25,22 @@ if [ ! -z "$helm_check" ]; then helm delete gateway-api-controller --namespace gateway-api-controller > /dev/null fi -#echo "Deleting VPC Lattice target groups..." - -#tg1=$(aws vpc-lattice list-target-groups --query "items[?name=='k8s-checkout-checkout'].id" --output text) -# -#if [ ! -z "$tg1" ]; then -# for id in $(aws vpc-lattice list-targets --target-group-identifier $tg1 --query 'items[].id' --output text); do -# aws vpc-lattice deregister-targets --target-group-identifier $tg1 --targets id=$id,port=8080 > /dev/null -# done -# -# aws vpc-lattice delete-target-group --target-group-identifier $tg1 > /dev/null -#fi -# -#tg2=$(aws vpc-lattice list-target-groups --query "items[?name=='k8s-checkout-checkoutv2'].id" --output text) -# -#if [ ! -z "$tg2" ]; then -# for id in $(aws vpc-lattice list-targets --target-group-identifier $tg2 --query 'items[].id' --output text); do -# aws vpc-lattice deregister-targets --target-group-identifier $tg2 --targets id=$id,port=8080 > /dev/null -# done -# -# aws vpc-lattice delete-target-group --target-group-identifier $tg2 > /dev/null -#fi - -PREFIX_LIST_ID=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists[?PrefixListName=="\'com.amazonaws.$AWS_REGION.vpc-lattice\'"].PrefixListId" | jq --raw-output .[]) -MANAGED_PREFIX=$(aws ec2 get-managed-prefix-list-entries --prefix-list-id $PREFIX_LIST_ID --output json | jq -r '.Entries[0].Cidr') CLUSTER_SG=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --output json| jq -r '.cluster.resourcesVpcConfig.clusterSecurityGroupId') -aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --cidr $MANAGED_PREFIX --protocol -1 > /dev/null \ No newline at end of file + +IPV4_PREFIX_LIST_ID=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists[?PrefixListName=="\'com.amazonaws.$AWS_REGION.vpc-lattice\'"].PrefixListId" | jq --raw-output .[]) +IPV4_MANAGED_PREFIX=$(aws ec2 get-managed-prefix-list-entries --prefix-list-id $IPV4_PREFIX_LIST_ID --output json | jq -r '.Entries[0].Cidr') + +ipv4_sg_check=$(aws ec2 describe-security-group-rules --filters Name="group-id",Values="$CLUSTER_SG" --query "SecurityGroupRules[?CidrIpv4=='$IPV4_MANAGED_PREFIX'].SecurityGroupRuleId" --output text) + +if [ ! -z "$ipv4_sg_check" ]; then + aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --ip-permissions IpProtocol=-1,IpRanges=[{CidrIp=$IPV4_MANAGED_PREFIX}] > /dev/null +fi + +IPV6_PREFIX_LIST_ID=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists[?PrefixListName=="\'com.amazonaws.$AWS_REGION.ipv6.vpc-lattice\'"].PrefixListId" | jq --raw-output .[]) +IPV6_MANAGED_PREFIX=$(aws ec2 get-managed-prefix-list-entries --prefix-list-id $IPV6_PREFIX_LIST_ID --output json | jq -r '.Entries[0].Cidr') + +ipv6_sg_check=$(aws ec2 describe-security-group-rules --filters Name="group-id",Values="$CLUSTER_SG" --query "SecurityGroupRules[?CidrIpv6=='$IPV6_MANAGED_PREFIX'].SecurityGroupRuleId" --output text) + +if [ ! -z "$ipv6_sg_check" ]; then + aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --ip-permissions IpProtocol=-1,Ipv6Ranges=[{CidrIpv6=$IPV6_MANAGED_PREFIX}] > /dev/null +fi diff --git a/manifests/modules/networking/vpc-lattice/.workshop/manifests/base/kustomization.yaml b/manifests/modules/networking/vpc-lattice/.workshop/manifests/base/kustomization.yaml new file mode 100644 index 000000000..b430d0f3a --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/.workshop/manifests/base/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- nlb.yaml diff --git a/manifests/modules/automation/controlplanes/crossplane/compositions/application/nlb.yaml b/manifests/modules/networking/vpc-lattice/.workshop/manifests/base/nlb.yaml similarity index 97% rename from manifests/modules/automation/controlplanes/crossplane/compositions/application/nlb.yaml rename to manifests/modules/networking/vpc-lattice/.workshop/manifests/base/nlb.yaml index e56524651..91b5a0cad 100644 --- a/manifests/modules/automation/controlplanes/crossplane/compositions/application/nlb.yaml +++ b/manifests/modules/networking/vpc-lattice/.workshop/manifests/base/nlb.yaml @@ -5,7 +5,7 @@ metadata: annotations: service.beta.kubernetes.io/aws-load-balancer-type: external service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip namespace: ui spec: type: LoadBalancer diff --git a/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv4/kustomization.yaml b/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv4/kustomization.yaml new file mode 100644 index 000000000..2df98b936 --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv4/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base diff --git a/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv6/kustomization.yaml b/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv6/kustomization.yaml new file mode 100644 index 000000000..cac42e686 --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv6/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base +patches: +- nlb.yaml diff --git a/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv6/nlb.yaml b/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv6/nlb.yaml new file mode 100644 index 000000000..43f748596 --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/.workshop/manifests/ipv6/nlb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + namespace: ui + annotations: + service.beta.kubernetes.io/aws-load-balancer-ip-address-type: "dualstack" \ No newline at end of file diff --git a/manifests/modules/networking/vpc-lattice/.workshop/manifests/nlb.yaml b/manifests/modules/networking/vpc-lattice/.workshop/manifests/nlb.yaml deleted file mode 100644 index e56524651..000000000 --- a/manifests/modules/networking/vpc-lattice/.workshop/manifests/nlb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ui-nlb - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance - namespace: ui -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 8080 - name: http - selector: - app.kubernetes.io/name: ui - app.kubernetes.io/instance: ui - app.kubernetes.io/component: service diff --git a/manifests/modules/networking/vpc-lattice/.workshop/terraform/addon.tf b/manifests/modules/networking/vpc-lattice/.workshop/terraform/addon.tf index 4f9824d72..b25ed72db 100644 --- a/manifests/modules/networking/vpc-lattice/.workshop/terraform/addon.tf +++ b/manifests/modules/networking/vpc-lattice/.workshop/terraform/addon.tf @@ -40,7 +40,8 @@ resource "aws_iam_policy" "lattice" { "vpc-lattice:*", "iam:CreateServiceLinkedRole", "ec2:DescribeVpcs", - "ec2:DescribeSubnets" + "ec2:DescribeSubnets", + "ec2:DescribeTags" ], "Resource": "*" } diff --git a/manifests/modules/networking/vpc-lattice/routes/target-group-policy.yaml b/manifests/modules/networking/vpc-lattice/routes/target-group-policy.yaml new file mode 100644 index 000000000..b84cb9297 --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/routes/target-group-policy.yaml @@ -0,0 +1,47 @@ +apiVersion: application-networking.k8s.aws/v1alpha1 +kind: TargetGroupPolicy +metadata: + name: checkout-policy + namespace: checkout +spec: + targetRef: + group: "" + kind: Service + name: checkout + protocol: HTTP + protocolVersion: HTTP1 + healthCheck: + enabled: true + intervalSeconds: 10 + timeoutSeconds: 1 + healthyThresholdCount: 3 + unhealthyThresholdCount: 2 + path: "/health" + port: 8080 + protocol: HTTP + protocolVersion: HTTP1 + statusMatch: "200" +--- +apiVersion: application-networking.k8s.aws/v1alpha1 +kind: TargetGroupPolicy +metadata: + name: checkout-policy + namespace: checkoutv2 +spec: + targetRef: + group: "" + kind: Service + name: checkout + protocol: HTTP + protocolVersion: HTTP1 + healthCheck: + enabled: true + intervalSeconds: 10 + timeoutSeconds: 1 + healthyThresholdCount: 3 + unhealthyThresholdCount: 2 + path: "/health" + port: 8080 + protocol: HTTP + protocolVersion: HTTP1 + statusMatch: "200" \ No newline at end of file diff --git a/manifests/modules/networking/vpc-lattice/ui/configmap.yaml b/manifests/modules/networking/vpc-lattice/ui/configmap.yaml new file mode 100644 index 000000000..ec6ce5750 --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/ui/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ui + namespace: ui +data: + ENDPOINTS_CHECKOUT: "${CHECKOUT_ROUTE_DNS}" diff --git a/manifests/modules/networking/vpc-lattice/ui/kustomization.yaml b/manifests/modules/networking/vpc-lattice/ui/kustomization.yaml new file mode 100644 index 000000000..e920f762a --- /dev/null +++ b/manifests/modules/networking/vpc-lattice/ui/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../../../base-application/ui +patches: +- configmap.yaml diff --git a/manifests/modules/observability/kubecost/.workshop/terraform/addon.tf b/manifests/modules/observability/kubecost/.workshop/terraform/addon.tf index 712fbf7f5..a5817c351 100644 --- a/manifests/modules/observability/kubecost/.workshop/terraform/addon.tf +++ b/manifests/modules/observability/kubecost/.workshop/terraform/addon.tf @@ -41,6 +41,8 @@ module "kubecost" { helm_config = { version = "1.102.0" - values = [data.http.kubecost_values.body, templatefile("${path.module}/values.yaml", {})] + values = [data.http.kubecost_values.body, templatefile("${path.module}/values.yaml", { + is_ipv6 = local.is_ipv6 + })] } } diff --git a/manifests/modules/observability/kubecost/.workshop/terraform/values.yaml b/manifests/modules/observability/kubecost/.workshop/terraform/values.yaml index a083a82da..0a709698e 100644 --- a/manifests/modules/observability/kubecost/.workshop/terraform/values.yaml +++ b/manifests/modules/observability/kubecost/.workshop/terraform/values.yaml @@ -2,4 +2,5 @@ service: type: LoadBalancer annotations: service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance \ No newline at end of file + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + %{ if is_ipv6 }service.beta.kubernetes.io/aws-load-balancer-ip-address-type: dualstack%{ endif } \ No newline at end of file diff --git a/manifests/modules/observability/oss-metrics/.workshop/terraform/addon.tf b/manifests/modules/observability/oss-metrics/.workshop/terraform/addon.tf index a28bd8bdd..93b90cdfc 100644 --- a/manifests/modules/observability/oss-metrics/.workshop/terraform/addon.tf +++ b/manifests/modules/observability/oss-metrics/.workshop/terraform/addon.tf @@ -405,6 +405,7 @@ ingress: annotations: alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/target-type: ip + %{ if local.is_ipv6 }alb.ingress.kubernetes.io/ip-address-type: dualstack%{ endif } ingressClassName: alb datasources: diff --git a/manifests/modules/security/Guardduty/mount/privileged-pod-example.yaml b/manifests/modules/security/Guardduty/mount/privileged-pod-example.yaml index 8648a119a..6fb4a9b84 100644 --- a/manifests/modules/security/Guardduty/mount/privileged-pod-example.yaml +++ b/manifests/modules/security/Guardduty/mount/privileged-pod-example.yaml @@ -25,4 +25,4 @@ spec: volumes: - name: test-volume hostPath: - path: /etc + path: /etc \ No newline at end of file diff --git a/manifests/modules/security/Guardduty/privileged/privileged-pod-example.yaml b/manifests/modules/security/Guardduty/privileged/privileged-pod-example.yaml index 7aaad4cc7..bef44d9b3 100644 --- a/manifests/modules/security/Guardduty/privileged/privileged-pod-example.yaml +++ b/manifests/modules/security/Guardduty/privileged/privileged-pod-example.yaml @@ -18,4 +18,4 @@ spec: ports: - containerPort: 22 securityContext: - privileged: true + privileged: true \ No newline at end of file diff --git a/manifests/modules/security/irsa/.workshop/manifests/base/kustomization.yaml b/manifests/modules/security/irsa/.workshop/manifests/base/kustomization.yaml new file mode 100644 index 000000000..b430d0f3a --- /dev/null +++ b/manifests/modules/security/irsa/.workshop/manifests/base/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- nlb.yaml diff --git a/manifests/modules/security/irsa/.workshop/manifests/base/nlb.yaml b/manifests/modules/security/irsa/.workshop/manifests/base/nlb.yaml new file mode 100644 index 000000000..91b5a0cad --- /dev/null +++ b/manifests/modules/security/irsa/.workshop/manifests/base/nlb.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + namespace: ui +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 8080 + name: http + selector: + app.kubernetes.io/name: ui + app.kubernetes.io/instance: ui + app.kubernetes.io/component: service diff --git a/manifests/modules/security/irsa/.workshop/manifests/ipv4/kustomization.yaml b/manifests/modules/security/irsa/.workshop/manifests/ipv4/kustomization.yaml new file mode 100644 index 000000000..2df98b936 --- /dev/null +++ b/manifests/modules/security/irsa/.workshop/manifests/ipv4/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base diff --git a/manifests/modules/security/irsa/.workshop/manifests/ipv6/kustomization.yaml b/manifests/modules/security/irsa/.workshop/manifests/ipv6/kustomization.yaml new file mode 100644 index 000000000..cac42e686 --- /dev/null +++ b/manifests/modules/security/irsa/.workshop/manifests/ipv6/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../base +patches: +- nlb.yaml diff --git a/manifests/modules/security/irsa/.workshop/manifests/ipv6/nlb.yaml b/manifests/modules/security/irsa/.workshop/manifests/ipv6/nlb.yaml new file mode 100644 index 000000000..43f748596 --- /dev/null +++ b/manifests/modules/security/irsa/.workshop/manifests/ipv6/nlb.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Service +metadata: + name: ui-nlb + namespace: ui + annotations: + service.beta.kubernetes.io/aws-load-balancer-ip-address-type: "dualstack" \ No newline at end of file diff --git a/manifests/modules/security/irsa/.workshop/manifests/nlb.yaml b/manifests/modules/security/irsa/.workshop/manifests/nlb.yaml deleted file mode 100644 index e56524651..000000000 --- a/manifests/modules/security/irsa/.workshop/manifests/nlb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ui-nlb - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance - namespace: ui -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 8080 - name: http - selector: - app.kubernetes.io/name: ui - app.kubernetes.io/instance: ui - app.kubernetes.io/component: service diff --git a/manifests/modules/security/irsa/dynamo/kustomization.yaml b/manifests/modules/security/irsa/dynamo/kustomization.yaml index c61c2554b..22fe05a39 100644 --- a/manifests/modules/security/irsa/dynamo/kustomization.yaml +++ b/manifests/modules/security/irsa/dynamo/kustomization.yaml @@ -9,5 +9,3 @@ configMapGenerator: behavior: replace options: disableNameSuffixHash: true -resources: -- nlb.yaml diff --git a/manifests/modules/security/irsa/dynamo/nlb.yaml b/manifests/modules/security/irsa/dynamo/nlb.yaml deleted file mode 100644 index e56524651..000000000 --- a/manifests/modules/security/irsa/dynamo/nlb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ui-nlb - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance - namespace: ui -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 8080 - name: http - selector: - app.kubernetes.io/name: ui - app.kubernetes.io/instance: ui - app.kubernetes.io/component: service diff --git a/test/util/src/cli.ts b/test/util/src/cli.ts index f510ab183..7e00f4e78 100644 --- a/test/util/src/cli.ts +++ b/test/util/src/cli.ts @@ -12,6 +12,7 @@ interface Opts { junitReport: string, bail: boolean, beforeEach: string, + skipTags: string, } const testCommand = new Command('test'); @@ -25,9 +26,10 @@ testCommand.argument('', 'file path to Markdown content') .option('-j, --junit-report ', 'Enables JUnit output format with report at the specified path', '') .option('-w, --work-dir ', 'Path to working directory where commands will be executed', '') .option('--before-each ', 'Command that will be run in each shell before executing a test case', '') + .option('--skip-tags ', 'Comma-separated list of tags which script blocks will not be executed', '') .option('-b, --bail', 'Bail after the first test failure') .action(async (path, options: Opts) => { - let markdownSh = new MarkdownSh(options.glob, options.debug) + let markdownSh = new MarkdownSh(options.glob, options.debug, options.skipTags) await markdownSh.test(path, options.dryRun, options.timeout, options.hookTimeout, options.bail, options.junitReport, options.beforeEach) }) @@ -36,7 +38,7 @@ const planCommand = new Command('plan') .argument('', 'file path to Markdown content') .option('-g, --glob ', 'Glob for tests to include ex. content/chapter1/*', '') .action(async (path, options: Opts) => { - let markdownSh = new MarkdownSh(options.glob, options.debug) + let markdownSh = new MarkdownSh(options.glob, options.debug, options.skipTags) await markdownSh.plan(path) }); diff --git a/test/util/src/lib/gatherer/gatherer.ts b/test/util/src/lib/gatherer/gatherer.ts index a7807f82c..925d4007a 100644 --- a/test/util/src/lib/gatherer/gatherer.ts +++ b/test/util/src/lib/gatherer/gatherer.ts @@ -26,7 +26,7 @@ export class Page { export class Script { constructor(public command: string, public wait: number, public timeout: number, - public hook: string | null, public hookTimeout: number, public expectError: boolean, public lineNumber: number | undefined) { + public hook: string | null, public hookTimeout: number, public expectError: boolean, public lineNumber: number | undefined, public tags: string[]) { } } @@ -41,6 +41,7 @@ export class Gatherer { static TEST_KEY: string = 'test'; static EXPECT_ERROR_KEY: string = 'expectError'; static RAW_KEY: string = 'raw'; + static TAGS_KEY: string = 'tags'; static INDEX_PAGES : Array = ['_index.md', 'index.en.md', 'index.md'] @@ -49,6 +50,8 @@ export class Gatherer { .use(remarkGfm) .use(remarkFrontmatter); + constructor(public skipTags: string[]) {} + public async gather(directory: string): Promise { if(!fs.existsSync(directory)) { throw new Error(`Directory '${directory}' not found`) @@ -171,6 +174,7 @@ export class Gatherer { let hookTimeout = 0 let expectError = false let raw = false; + let tags : string[] = []; if(meta) { // TODO: Change this to regex https://regex101.com/r/uB4sI9/1 @@ -206,6 +210,9 @@ export class Gatherer { case Gatherer.HOOK_TIMEOUT_KEY: hookTimeout = parseInt(value) break; + case Gatherer.TAGS_KEY: + tags = value.split(',') + break; default: console.log(`Warning: Unrecognized param ${key} in code block`); } @@ -213,12 +220,15 @@ export class Gatherer { }); } } + + const skipTagsIntersection = tags.filter(value => this.skipTags.includes(value)); + const skip = skipTagsIntersection.length > 0; - if(add) { + if(add && !skip) { let command = this.extractCommand(child.value, raw) if(command.length > 0) { - data.push(new Script(command, wait, timeout, hook, hookTimeout, expectError, child.position?.start.line)); + data.push(new Script(command, wait, timeout, hook, hookTimeout, expectError, child.position?.start.line, tags)); } } } diff --git a/test/util/src/lib/markdownsh.ts b/test/util/src/lib/markdownsh.ts index c2de51894..0c65ad950 100644 --- a/test/util/src/lib/markdownsh.ts +++ b/test/util/src/lib/markdownsh.ts @@ -7,10 +7,12 @@ import { DefaultShell, Shell, ShellError, ShellTimeout } from "./shell/shell.js" import fs from 'fs' export class MarkdownSh { - private gatherer = new Gatherer(); + private gatherer : Gatherer; constructor(private glob: string, - private debug: boolean) { + private debug: boolean, + private skipTags: string) { + this.gatherer = new Gatherer(skipTags.split(',')); } async plan(directory: string) { diff --git a/test/util/tests/fixtures/basic/tags/index.md b/test/util/tests/fixtures/basic/tags/index.md new file mode 100644 index 000000000..579d69d03 --- /dev/null +++ b/test/util/tests/fixtures/basic/tags/index.md @@ -0,0 +1,12 @@ +--- +title: "Tags" +weight: 20 +--- + +```bash tags=tag1 +$ dummy +``` + +```bash tags=tag2 +$ dummy1 +``` \ No newline at end of file diff --git a/test/util/tests/gatherer.test.ts b/test/util/tests/gatherer.test.ts index b8b99b309..850c5a6cb 100644 --- a/test/util/tests/gatherer.test.ts +++ b/test/util/tests/gatherer.test.ts @@ -2,6 +2,7 @@ import { expect } from 'chai'; import mock from 'mock-fs' import sinon from 'sinon' import {Category, Gatherer} from '../src/lib/gatherer/gatherer.js' +import { log } from 'console'; describe('Gatherer', () => { let gatherer: Gatherer @@ -13,7 +14,7 @@ describe('Gatherer', () => { }); beforeEach(() => { - gatherer = new Gatherer() + gatherer = new Gatherer(["tag1"]) }); context('when loading basic content', () => { @@ -118,6 +119,12 @@ describe('Gatherer', () => { expect(emptyBlockPage?.scripts.length).to.equal(1) }); + + it('should ignore blocks with skipped tag', async () => { + const tagPage = result?.children[2].pages[0] + + expect(tagPage?.scripts.length).to.equal(1) + }); }) after(() => { diff --git a/website/docs/automation/controlplanes/ack/configure-application.md b/website/docs/automation/controlplanes/ack/configure-application.md index fdc26dade..214b0a8ea 100644 --- a/website/docs/automation/controlplanes/ack/configure-application.md +++ b/website/docs/automation/controlplanes/ack/configure-application.md @@ -58,7 +58,7 @@ k8s-ui-uinlb-a9797f0f61.elb.us-west-2.amazonaws.com To wait until the load balancer has finished provisioning you can run this command: ```bash timeout=610 -$ wait-for-lb $(kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}") +$ wait-for-lb $(kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}")/home ``` Once the load balancer is provisioned you can access it by pasting the URL in your web browser. You will see the UI from the web store displayed and will be able to navigate around the site as a user. diff --git a/website/docs/fundamentals/exposing/ingress/adding-ingress.md b/website/docs/fundamentals/exposing/ingress/adding-ingress.md index ba450d225..e8a08aa88 100644 --- a/website/docs/fundamentals/exposing/ingress/adding-ingress.md +++ b/website/docs/fundamentals/exposing/ingress/adding-ingress.md @@ -5,16 +5,36 @@ sidebar_position: 20 Let's create an Ingress resource with the following manifest: + + + ```file -manifests/modules/exposing/ingress/creating-ingress/ingress.yaml +manifests/modules/exposing/ingress/creating-ingress/ipv4/ingress.yaml ``` This will cause the AWS Load Balancer Controller to provision an Application Load Balancer and configure it to route traffic to the Pods for the `ui` application. -```bash timeout=180 hook=add-ingress hookTimeout=430 -$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/creating-ingress +```bash timeout=180 hook=add-ingress hookTimeout=430 tags=ipv4 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/creating-ingress/ipv4 +``` + + + + +```file +manifests/modules/exposing/ingress/creating-ingress/ipv6/ingress.yaml +``` + +This will cause the AWS Load Balancer Controller to provision an Application Load Balancer with both "A" and "AAAA" records and configures it to route traffic to the Pods for the `ui` application. The `alb.ingress.kubernetes.io/ip-address-type: dualstack` annotation is key to enabling IPv6 support. + +```bash timeout=180 hook=add-ingress hookTimeout=430 tags=ipv6 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/creating-ingress/ipv6 ``` + + + +## Verify Ingress resource Let's inspect the Ingress object created: ```bash @@ -110,7 +130,7 @@ k8s-ui-uinlb-a9797f0f61.elb.us-west-2.amazonaws.com To wait until the load balancer has finished provisioning you can run this command: ```bash -$ wait-for-lb $(kubectl get ingress -n ui ui -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}") +$ wait-for-lb $(kubectl get ingress -n ui ui -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}")/home ``` And access it in your web browser. You will see the UI from the web store displayed and will be able to navigate around the site as a user. diff --git a/website/docs/fundamentals/exposing/ingress/multiple-ingress.md b/website/docs/fundamentals/exposing/ingress/multiple-ingress.md index 8af9befc1..5ae3461b9 100644 --- a/website/docs/fundamentals/exposing/ingress/multiple-ingress.md +++ b/website/docs/fundamentals/exposing/ingress/multiple-ingress.md @@ -14,24 +14,65 @@ $ curl $ADDRESS/catalogue The first thing we'll do is re-create the Ingress for `ui` component adding the annotation `alb.ingress.kubernetes.io/group.name`: + + + +```file +manifests/modules/exposing/ingress/multiple-ingress/ipv4/ingress-ui.yaml +``` + + + + ```file -manifests/modules/exposing/ingress/multiple-ingress/ingress-ui.yaml +manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-ui.yaml ``` + + + Now, let's create a separate Ingress for the `catalog` component that also leverages the same `group.name`: + + + ```file -manifests/modules/exposing/ingress/multiple-ingress/ingress-catalog.yaml +manifests/modules/exposing/ingress/multiple-ingress/ipv4/ingress-catalog.yaml ``` -This ingress is also configuring rules to route requests prefixed with `/catalogue` to the `catalog` component. + + + +```file +manifests/modules/exposing/ingress/multiple-ingress/ipv6/ingress-catalog.yaml +``` + + + + +Lets apply these manifests: + + + + +```bash timeout=180 hook=add-ingress hookTimeout=430 tags=ipv4 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/multiple-ingress/ipv4 +``` -Apply these manifests to the cluster: + + -```bash timeout=180 hook=add-ingress hookTimeout=430 -$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/multiple-ingress +```bash timeout=180 hook=add-ingress hookTimeout=430 tags=ipv6 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/multiple-ingress/ipv6 ``` + + + +This ingress is also configuring rules to route requests prefixed with `/catalogue` to the `catalog` component. + +## Verify Ingress resource + We'll now have two separate Ingress objects in our cluster: ```bash @@ -64,7 +105,7 @@ https://console.aws.amazon.com/ec2/home#LoadBalancers:tag:ingress.k8s.aws/stack= To wait until the load balancer has finished provisioning you can run this command: ```bash -$ wait-for-lb $(kubectl get ingress -n ui ui -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}") +$ wait-for-lb $(kubectl get ingress -n ui ui -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}")/home ``` Try accessing the new Ingress URL in the browser as before to check the web UI still works: diff --git a/website/docs/fundamentals/exposing/ingress/tests/hook-add-ingress.sh b/website/docs/fundamentals/exposing/ingress/tests/hook-add-ingress.sh index e573766a3..3b515324c 100644 --- a/website/docs/fundamentals/exposing/ingress/tests/hook-add-ingress.sh +++ b/website/docs/fundamentals/exposing/ingress/tests/hook-add-ingress.sh @@ -14,19 +14,7 @@ after() { exit 1 fi - EXIT_CODE=0 - - timeout -s TERM 400 bash -c \ - 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/home)" != "200" ]];\ - do sleep 20;\ - done' || EXIT_CODE=$? - - echo "Timeout completed" - - if [ $EXIT_CODE -ne 0 ]; then - >&2 echo "Ingress did not become available after 400 seconds" - exit 1 - fi + wait-for-lb ${ui_endpoint}/home } "$@" diff --git a/website/docs/fundamentals/exposing/loadbalancer/adding-lb.md b/website/docs/fundamentals/exposing/loadbalancer/adding-lb.md index 7d3c1c87b..4dc3cd983 100644 --- a/website/docs/fundamentals/exposing/loadbalancer/adding-lb.md +++ b/website/docs/fundamentals/exposing/loadbalancer/adding-lb.md @@ -3,21 +3,25 @@ title: "Creating the load balancer" sidebar_position: 20 --- +:::note +As of today, NLB doesn't support "instance mode" for IPv6 targets. You can skip ahead to the "IP mode" section if you are running this workshop on IPv6 cluster. +::: + Let's create an additional Service that provisions a load balancer with the following kustomization: ```file -manifests/modules/exposing/load-balancer/nlb/nlb.yaml +manifests/modules/exposing/load-balancer/ipv4/nlb/nlb.yaml ``` This `Service` will create a Network Load Balancer that listens on port 80 and forwards connections to the `ui` Pods on port 8080. An NLB is a layer 4 load balancer that on our case operates at the TCP layer. -```bash timeout=180 hook=add-lb hookTimeout=430 -$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/load-balancer/nlb +```bash timeout=180 hook=add-lb hookTimeout=430 tags=ipv4 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/load-balancer/ipv4/nlb ``` Let's inspect the Service resources for the `ui` application again: -```bash +```bash tags=ipv4 $ kubectl get service -n ui ``` @@ -27,7 +31,7 @@ The NLB will take several minutes to provision and register its targets so take First, take a look at the load balancer itself: -```bash +```bash tags=ipv4 $ aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-uinlb`) == `true`]' [ { @@ -71,7 +75,7 @@ What does this tell us? We can also inspect the targets in the target group that was created by the controller: -```bash +```bash tags=ipv4 $ ALB_ARN=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-uinlb`) == `true`].LoadBalancerArn' | jq -r '.[0]') $ TARGET_GROUP_ARN=$(aws elbv2 describe-target-groups --load-balancer-arn $ALB_ARN | jq -r '.TargetGroups[0].TargetGroupArn') $ aws elbv2 describe-target-health --target-group-arn $TARGET_GROUP_ARN @@ -119,15 +123,15 @@ https://console.aws.amazon.com/ec2/home#LoadBalancers:tag:service.k8s.aws/stack= Get the URL from the Service resource: -```bash +```bash tags=ipv4 $ kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}" k8s-ui-uinlb-a9797f0f61.elb.us-west-2.amazonaws.com ``` To wait until the load balancer has finished provisioning you can run this command: -```bash -$ wait-for-lb $(kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}") +```bash tags=ipv4 +$ wait-for-lb $(kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}")/home ``` Now that our application is exposed to the outside world, lets try to access it by pasting that URL in your web browser. You will see the UI from the web store displayed and will be able to navigate around the site as a user. diff --git a/website/docs/fundamentals/exposing/loadbalancer/ip-mode.md b/website/docs/fundamentals/exposing/loadbalancer/ip-mode.md index b48eea836..d39276d10 100644 --- a/website/docs/fundamentals/exposing/loadbalancer/ip-mode.md +++ b/website/docs/fundamentals/exposing/loadbalancer/ip-mode.md @@ -21,17 +21,41 @@ Let's reconfigure our NLB to use IP mode and look at the effect it has on the in This is the patch we'll be applying to re-configure the Service: + + + ```kustomization -modules/exposing/load-balancer/ip-mode/nlb.yaml +modules/exposing/load-balancer/ipv4/ip-mode/nlb.yaml Service/ui-nlb ``` Apply the manifest with kustomize: -```bash -$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/load-balancer/ip-mode +```bash tags=ipv4 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/load-balancer/ipv4/ip-mode +``` + + + + +```kustomization +modules/exposing/load-balancer/ipv6/ip-mode/nlb.yaml +Service/ui-nlb ``` +Currently, NLB only supports "IP mode" for IPv6 targets. The AWS Load Balancer Controller supports creating dual-stack NLBs on IPv6 cluster. The ELB controller supports dual-stack NLB ffor pods running on both AWS EC2 instances and AWS Fargate. This Service will create a dual-stack Network Load Balancer with both "A" and "AAAA" records that listens on port 80 and forwards connections to the IPv6 address of ui Pods on port 8080. + +Apply the manifest with kustomize: + +```bash tags=ipv6 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/load-balancer/ipv6/ip-mode +``` + + + + +## Verify the service + It will take a few minutes for the configuration of the load balancer to be updated. Run the following command to ensure the annotation is updated: ```bash @@ -129,5 +153,5 @@ As expected we now have 3 targets, matching the number of replicas in the ui Dep If you want to wait to make sure the application still functions the same, run the following command. Otherwise you can proceed to the next module. ```bash timeout=240 -$ wait-for-lb $(kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}") +$ wait-for-lb $(kubectl get service -n ui ui-nlb -o jsonpath="{.status.loadBalancer.ingress[*].hostname}{'\n'}")/home ``` diff --git a/website/docs/fundamentals/exposing/loadbalancer/tests/hook-add-lb.sh b/website/docs/fundamentals/exposing/loadbalancer/tests/hook-add-lb.sh index c8253d381..37924f4ac 100644 --- a/website/docs/fundamentals/exposing/loadbalancer/tests/hook-add-lb.sh +++ b/website/docs/fundamentals/exposing/loadbalancer/tests/hook-add-lb.sh @@ -14,19 +14,7 @@ after() { exit 1 fi - EXIT_CODE=0 - - timeout -s TERM 400 bash -c \ - 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/home)" != "200" ]];\ - do sleep 20;\ - done' || EXIT_CODE=$? - - echo "Timeout completed" - - if [ $EXIT_CODE -ne 0 ]; then - >&2 echo "Load balancer did not become available after 400 seconds" - exit 1 - fi + wait-for-lb ${ui_endpoint}/home } "$@" diff --git a/website/docs/introduction/index.mdx b/website/docs/introduction/index.mdx index 27a26ebff..d1cdf8ba1 100644 --- a/website/docs/introduction/index.mdx +++ b/website/docs/introduction/index.mdx @@ -7,7 +7,7 @@ import ReactPlayer from 'react-player' Welcome to the **AWS Elastic Kubernetes Service (EKS) workshop**! -This workshop guides you through a set of hands-on lab exercises to learn and explore the various features provided by EKS and how it integrates with the broader set of services offered by AWS. The labs are grouped across a number of areas: +This workshop guides you through a set of hands-on lab exercises to learn and explore the various features provided by EKS and how it integrates with the broader set of services offered by AWS. The workshop supports both Amazon EKS IPv4 and IPv6 clusters. The labs are grouped across a number of areas: * **Introduction** - Learn the format and structure of this workshop * **Fundamentals** - Familiarize yourself with basic EKS concepts such as managed node groups, Fargate, exposing your applications and utilizing storage diff --git a/website/docs/introduction/setup/your-account/using-eksctl.md b/website/docs/introduction/setup/your-account/using-eksctl.md index 182b4ad53..459ac2729 100644 --- a/website/docs/introduction/setup/your-account/using-eksctl.md +++ b/website/docs/introduction/setup/your-account/using-eksctl.md @@ -5,27 +5,56 @@ sidebar_position: 20 This section outlines how to build a cluster for the lab exercises using the [eksctl tool](https://eksctl.io/). This is the easiest way to get started, and is recommended for most learners. -The `eksctl` utility has been pre-installed in Cloud9 so we can immediately create the cluster. This is the configuration that will be used to build the cluster: +The `eksctl` utility has been pre-installed in Cloud9 so we can immediately create the cluster. + +:::tip + +You can choose to do the labs using either IPv4 or IPv6 VPC networking. Unless otherwise needed it is recommended to use IPv4 since there are some labs that do not function with IPv6. Select the appropriate tab below. + +Once you have built a cluster with a particular network family you will need to recreate the cluster if you wish to switch. + +::: + + + + +This is the configuration that will be used to build a cluster that uses IPv4 networking: ```file hidePath=true -manifests/../cluster/eksctl/cluster.yaml +manifests/../cluster/eksctl/ipv4/cluster.yaml ``` -Based on this configuration `eksctl` will: -- Create a VPC across three availability zones -- Create an EKS cluster -- Create an IAM OIDC provider -- Add a managed node group named `default` -- Configure the VPC CNI to use prefix delegation +```bash test=false +$ export EKS_CLUSTER_NAME=eks-workshop +$ curl -fsSL https://raw.githubusercontent.com/VAR::MANIFESTS_OWNER/VAR::MANIFESTS_REPOSITORY/VAR::MANIFESTS_REF/cluster/eksctl/ipv4/cluster.yaml | \ +envsubst | eksctl create cluster -f - +``` + + + -Apply the configuration file like so: +This is the configuration that will be used to build a cluster that uses IPv6 networking: + +```file hidePath=true +manifests/../cluster/eksctl/ipv6/cluster.yaml +``` ```bash test=false $ export EKS_CLUSTER_NAME=eks-workshop -$ curl -fsSL https://raw.githubusercontent.com/VAR::MANIFESTS_OWNER/VAR::MANIFESTS_REPOSITORY/VAR::MANIFESTS_REF/cluster/eksctl/cluster.yaml | \ +$ curl -fsSL https://raw.githubusercontent.com/VAR::MANIFESTS_OWNER/VAR::MANIFESTS_REPOSITORY/VAR::MANIFESTS_REF/cluster/eksctl/ipv6/cluster.yaml | \ envsubst | eksctl create cluster -f - ``` + + + +Based on this configuration `eksctl` will: +- Create a VPC across three availability zones +- Create an EKS cluster +- Create an IAM OIDC provider +- Add a managed node group named `default` +- Configure the VPC CNI to use prefix delegation (only in IPv4 mode) + This generally takes 20 minutes. Once the cluster is created run this command to use the cluster for the lab exercises: ```bash test=false @@ -48,4 +77,4 @@ Next delete the cluster with `eksctl`: ```bash test=false $ eksctl delete cluster $EKS_CLUSTER_NAME --wait -``` \ No newline at end of file +``` diff --git a/website/docs/networking/custom-networking/configure-vpc-cni.md b/website/docs/networking/custom-networking/configure-vpc-cni.md index 1414f867b..f4bdd3910 100644 --- a/website/docs/networking/custom-networking/configure-vpc-cni.md +++ b/website/docs/networking/custom-networking/configure-vpc-cni.md @@ -5,7 +5,7 @@ sidebar_position: 10 We'll start by configuring the Amazon VPC CNI. Our VPC has been reconfigured with the addition of a secondary CIDR with the range `100.64.0.0/16`: -```bash wait=30 +```bash tags=ipv4 wait=30 $ aws ec2 describe-vpcs --vpc-ids $VPC_ID | jq '.Vpcs[0].CidrBlockAssociationSet' [ { @@ -27,7 +27,7 @@ $ aws ec2 describe-vpcs --vpc-ids $VPC_ID | jq '.Vpcs[0].CidrBlockAssociationSet This means that we now have a separate CIDR range we can use in addition to the default CIDR range, which in the above output is `10.42.0.0/16`. From this new CIDR range we have added 3 new subnets to the VPC which will be used for running our pods: -```bash wait=30 +```bash tags=ipv4 wait=30 $ echo "The secondary subnet in AZ $SUBNET_AZ_1 is $SECONDARY_SUBNET_1" $ echo "The secondary subnet in AZ $SUBNET_AZ_2 is $SECONDARY_SUBNET_2" $ echo "The secondary subnet in AZ $SUBNET_AZ_3 is $SECONDARY_SUBNET_3" @@ -35,7 +35,7 @@ $ echo "The secondary subnet in AZ $SUBNET_AZ_3 is $SECONDARY_SUBNET_3" To enable custom networking we have to set the `AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG` environment variable to *true* in the aws-node DaemonSet. -```bash wait=30 +```bash tags=ipv4 wait=30 $ kubectl set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true ``` @@ -47,18 +47,18 @@ manifests/modules/networking/custom-networking/provision/eniconfigs.yaml Let's apply these to our cluster: -```bash wait=30 +```bash tags=ipv4 wait=30 $ kubectl apply -k ~/environment/eks-workshop/modules/networking/custom-networking/provision ``` Confirm that the `ENIConfig` objects were created: -```bash wait=30 +```bash tags=ipv4 wait=30 $ kubectl get ENIConfigs ``` Finally we'll update the aws-node DaemonSet to automatically apply the `ENIConfig` for an Availability Zone to any new Amazon EC2 nodes created in the EKS cluster. -```bash wait=30 +```bash tags=ipv4 wait=30 $ kubectl set env daemonset aws-node -n kube-system ENI_CONFIG_LABEL_DEF=topology.kubernetes.io/zone ``` diff --git a/website/docs/networking/custom-networking/deploy-sample-application.md b/website/docs/networking/custom-networking/deploy-sample-application.md index d39648ad9..d7b63a7d1 100644 --- a/website/docs/networking/custom-networking/deploy-sample-application.md +++ b/website/docs/networking/custom-networking/deploy-sample-application.md @@ -7,7 +7,7 @@ In order to test the custom networking updates we have made so far, lets update To make the change, run the following command to modify the `checkout` deployment in your cluster -```bash wait=30 timeout=240 +```bash tags=ipv4 wait=30 timeout=240 $ kubectl apply -k ~/environment/eks-workshop/modules/networking/custom-networking/sampleapp $ kubectl rollout status deployment/checkout -n checkout --timeout 180s ``` @@ -21,7 +21,7 @@ Deployment/checkout Let's review the microservices deployed in the “checkout” namespace. -```bash wait=30 +```bash tags=ipv4 wait=30 $ kubectl get pods -n checkout -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES checkout-5fbbc99bb7-brn2m 1/1 Running 0 98s 100.64.10.16 ip-10-42-10-14.us-west-2.compute.internal diff --git a/website/docs/networking/custom-networking/index.md b/website/docs/networking/custom-networking/index.md index b9194e506..a682a89fa 100644 --- a/website/docs/networking/custom-networking/index.md +++ b/website/docs/networking/custom-networking/index.md @@ -7,7 +7,7 @@ sidebar_custom_props: {"module": true} :::tip Before you start Prepare your environment for this section: -```bash wait=30 timeout=300 +```bash tags=ipv4 wait=30 timeout=300 $ prepare-environment networking/custom-networking ``` diff --git a/website/docs/networking/custom-networking/provision-new-node-group.md b/website/docs/networking/custom-networking/provision-new-node-group.md index 28c99513e..709044bf5 100644 --- a/website/docs/networking/custom-networking/provision-new-node-group.md +++ b/website/docs/networking/custom-networking/provision-new-node-group.md @@ -5,7 +5,7 @@ sidebar_position: 20 Create an EKS managed node group: -```bash wait=30 +```bash tags=ipv4 wait=30 $ aws eks create-nodegroup --region $AWS_REGION \ --cluster-name $EKS_CLUSTER_NAME \ --nodegroup-name custom-networking \ @@ -17,13 +17,13 @@ $ aws eks create-nodegroup --region $AWS_REGION \ Node group creation takes several minutes. You can wait for the node group creation to complete using this command: -```bash wait=30 timeout=300 +```bash tags=ipv4 wait=30 timeout=300 $ aws eks wait nodegroup-active --cluster-name $EKS_CLUSTER_NAME --nodegroup-name custom-networking ``` Once this is complete we can see the new nodes registered in the EKS cluster: -```bash wait=30 +```bash tags=ipv4 wait=30 $ kubectl get nodes -L eks.amazonaws.com/nodegroup NAME STATUS ROLES AGE VERSION NODEGROUP ip-10-42-104-242.us-west-2.compute.internal Ready 84m v1.25.6-eks-48e63af default diff --git a/website/docs/networking/custom-networking/vpc.md b/website/docs/networking/custom-networking/vpc.md index 2323a84f4..a0a92f9e8 100644 --- a/website/docs/networking/custom-networking/vpc.md +++ b/website/docs/networking/custom-networking/vpc.md @@ -5,7 +5,7 @@ sidebar_position: 5 We can start by inspecting the VPC that has been set up. For example describe the VPC: -```bash wait=30 +```bash tags=ipv4 wait=30 $ aws ec2 describe-vpcs --vpc-ids $VPC_ID { "Vpcs": [ @@ -63,7 +63,7 @@ https://console.aws.amazon.com/vpc/home#vpcs:tag:created-by=eks-workshop-v2 Describing the subnets associated with the VPC will show 9 subnets: -```bash wait=30 +```bash tags=ipv4 wait=30 $ aws ec2 describe-subnets --filters "Name=tag:created-by,Values=eks-workshop-v2" \ --query "Subnets[*].CidrBlock" [ diff --git a/website/docs/networking/index.mdx b/website/docs/networking/index.mdx index 48532e096..d75aafb70 100644 --- a/website/docs/networking/index.mdx +++ b/website/docs/networking/index.mdx @@ -5,6 +5,10 @@ chapter: true weight: 20 --- +:::note +You may skip this lab if you are running IPv6 cluster. Amazon EKS support for IPv6 primary goal is to solve IPv4 exhaustion issues. Hence custom networking doesn't apply to IPv6 clusters. By default, Amamzon VPC CNI is deployed in "PREFIX MODE" on IPv6 clusters. Amazon VPC CNI configure /80 IPv6 prefix during the node startup time. The CNI supports both IPv4 and IPv6 egress from within the Pods. Security groups for pods is not supported on IPv6 clusters yet. +::: + import ReactPlayer from 'react-player' It's essential to understand Kubernetes networking to operate your cluster and applications efficiently. Pod networking, also called cluster networking, is the center of Kubernetes networking. Kubernetes supports Container Network Interface (CNI) plugins for cluster networking. diff --git a/website/docs/networking/prefix/configure.md b/website/docs/networking/prefix/configure.md index 145b654db..befc2a08c 100644 --- a/website/docs/networking/prefix/configure.md +++ b/website/docs/networking/prefix/configure.md @@ -5,7 +5,7 @@ sidebar_position: 30 Before we begin, lets confirm if the VPC CNI is installed and running. -```bash +```bash tags=ipv4 $ kubectl get pods --selector=k8s-app=aws-node -n kube-system NAME READY STATUS RESTARTS AGE aws-node-btst2 1/1 Running 0 107m @@ -15,7 +15,7 @@ aws-node-zd5rg 1/1 Running 0 107m Confirm the CNI version. The CNI version must be 1.9.0 or later. -```bash +```bash tags=ipv4 $ kubectl describe daemonset aws-node --namespace kube-system | grep Image | cut -d "/" -f 2 amazon-k8s-cni-init:v1.12.0-eksbuild.1 amazon-k8s-cni:v1.12.0-eksbuild.1 @@ -25,7 +25,7 @@ You will see similar output to above. Confirm if the VPC CNI is configured to run in prefix mode. The `ENABLE_PREFIX_DELEGATION` value should be set to "true": -```bash +```bash tags=ipv4 $ kubectl get ds aws-node -o yaml -n kube-system | yq '.spec.template.spec.containers[].env' [...] - name: ENABLE_PREFIX_DELEGATION @@ -35,7 +35,7 @@ $ kubectl get ds aws-node -o yaml -n kube-system | yq '.spec.template.spec.conta Since prefix delegation is enabled (this was done at cluster creation for this workshop), we should be able to see prefix assigned to the network interfaces of the worker nodes. You should see output similar to below. -```bash +```bash tags=ipv4 $ aws ec2 describe-instances --filters "Name=tag-key,Values=eks:cluster-name" \ "Name=tag-value,Values=${EKS_CLUSTER_NAME}" \ --query 'Reservations[*].Instances[].{InstanceId: InstanceId, Prefixes: NetworkInterfaces[].Ipv4Prefixes[]}' diff --git a/website/docs/networking/prefix/consume.md b/website/docs/networking/prefix/consume.md index b523930b4..d4ad93dbd 100644 --- a/website/docs/networking/prefix/consume.md +++ b/website/docs/networking/prefix/consume.md @@ -11,7 +11,7 @@ manifests/modules/networking/prefix/deployment-pause.yaml This will spin up `150 pods` and may take some time: -```bash +```bash tags=ipv4 $ kubectl apply -k ~/environment/eks-workshop/modules/networking/prefix deployment.apps/pause-pods-prefix created $ kubectl wait --for=condition=available --timeout=60s deployment/pause-pods-prefix -n other @@ -19,7 +19,7 @@ $ kubectl wait --for=condition=available --timeout=60s deployment/pause-pods-pre Check the pause pods are in a running state: -```bash +```bash tags=ipv4 $ kubectl get deployment -n other NAME READY UP-TO-DATE AVAILABLE AGE pause-pods-prefix 150/150 150 150 101s @@ -27,7 +27,7 @@ pause-pods-prefix 150/150 150 150 101s Once the pods are running successfully, we should be able to see the additional prefixes added to the worker nodes. -```bash +```bash tags=ipv4 $ aws ec2 describe-instances --filters "Name=tag-key,Values=eks:cluster-name" "Name=tag-value,Values=${EKS_CLUSTER_NAME}" \ --query 'Reservations[*].Instances[].{InstanceId: InstanceId, Prefixes: NetworkInterfaces[].Ipv4Prefixes[]}' ``` diff --git a/website/docs/networking/prefix/index.md b/website/docs/networking/prefix/index.md index c5322429b..9dc0a702f 100644 --- a/website/docs/networking/prefix/index.md +++ b/website/docs/networking/prefix/index.md @@ -7,7 +7,7 @@ sidebar_custom_props: {"module": true} :::tip Before you start Prepare your environment for this section: -```bash timeout=300 wait=30 +```bash tags=ipv4 timeout=300 wait=30 $ prepare-environment networking/prefix ``` @@ -26,3 +26,14 @@ As more Pods scheduled additional prefixes will be requested for the existing EN ![prefix-flow](prefix_flow.jpeg) Please visit [EKS best practices guide](https://aws.github.io/aws-eks-best-practices/networking/prefix-mode/) for the list of recommendations for using VPC CNI in prefix mode. + + + + + + + + + + +
FruitBanana
VegetableCarrot
\ No newline at end of file diff --git a/website/docs/networking/security-groups-for-pods/add-sg.md b/website/docs/networking/security-groups-for-pods/add-sg.md index c92a41cc7..e248a50ac 100644 --- a/website/docs/networking/security-groups-for-pods/add-sg.md +++ b/website/docs/networking/security-groups-for-pods/add-sg.md @@ -8,7 +8,7 @@ In order for our catalog Pod to successfully connect to the RDS instance we'll n A security group which allows access to the RDS database has already been set up for you, and we can view it like so: -```bash +```bash tags=ipv4 $ export CATALOG_SG_ID=$(aws ec2 describe-security-groups \ --filters Name=vpc-id,Values=$VPC_ID Name=group-name,Values=$EKS_CLUSTER_NAME-catalog \ --query "SecurityGroups[0].GroupId" --output text) @@ -72,7 +72,7 @@ SecurityGroupPolicy/catalog-rds-access Apply this to the cluster then recycle the catalog Pods once again: -```bash +```bash tags=ipv4 $ kubectl apply -k ~/environment/eks-workshop/modules/networking/securitygroups-for-pods/sg namespace/catalog unchanged serviceaccount/catalog unchanged @@ -94,7 +94,7 @@ deployment "catalog" successfully rolled out This time the catalog Pod will start and the rollout will succeed. You can check the logs to confirm its connecting to the RDS database: -```bash +```bash tags=ipv4 $ kubectl -n catalog logs deployment/catalog | grep Connect 2022/12/20 20:52:10 Connecting to catalog_user:xxxxxxxxxx@tcp(eks-workshop-catalog.cjkatqd1cnrz.us-west-2.rds.amazonaws.com:3306)/catalog?timeout=5s 2022/12/20 20:52:10 Connected diff --git a/website/docs/networking/security-groups-for-pods/index.md b/website/docs/networking/security-groups-for-pods/index.md index e6d06b48a..d596c915e 100644 --- a/website/docs/networking/security-groups-for-pods/index.md +++ b/website/docs/networking/security-groups-for-pods/index.md @@ -8,7 +8,7 @@ sidebar_custom_props: {"module": true} :::tip Before you start Prepare your environment for this section: -```bash timeout=900 wait=30 +```bash tags=ipv4 timeout=900 wait=30 $ prepare-environment networking/securitygroups-for-pods ``` diff --git a/website/docs/networking/security-groups-for-pods/inspecting-pod.md b/website/docs/networking/security-groups-for-pods/inspecting-pod.md index ca267a4fd..ff213ad06 100644 --- a/website/docs/networking/security-groups-for-pods/inspecting-pod.md +++ b/website/docs/networking/security-groups-for-pods/inspecting-pod.md @@ -7,7 +7,7 @@ Now that the catalog Pod is running and successfully using our Amazon RDS databa The first thing we can do is check the annotations of the Pod: -```bash +```bash tags=ipv4 $ kubectl get pod -n catalog -l app.kubernetes.io/component=service -o yaml \ | yq '.items[0].metadata.annotations' kubernetes.io/psp: eks.privileged @@ -21,7 +21,7 @@ The `vpc.amazonaws.com/pod-eni` annotation shows metadata regarding things like The Kubernetes events will also show the VPC resource controller taking action in response to the configuration we added: -```bash +```bash tags=ipv4 $ kubectl get events -n catalog | grep SecurityGroupRequested 5m Normal SecurityGroupRequested pod/catalog-6ccc6b5575-w2fvm Pod will get the following Security Groups [sg-037ec36e968f1f5e7] ``` diff --git a/website/docs/networking/security-groups-for-pods/introduction.md b/website/docs/networking/security-groups-for-pods/introduction.md index 3acdc6b95..bae5932ba 100644 --- a/website/docs/networking/security-groups-for-pods/introduction.md +++ b/website/docs/networking/security-groups-for-pods/introduction.md @@ -7,7 +7,7 @@ The `catalog` component of our architecture uses a MySQL database as its storage You can see this by running the following command: -```bash +```bash tags=ipv4 $ kubectl -n catalog get pod NAME READY STATUS RESTARTS AGE catalog-5d7fc9d8f-xm4hs 1/1 Running 0 14m @@ -16,7 +16,7 @@ catalog-mysql-0 1/1 Running 0 14m In the case above, the Pod `catalog-mysql-0` is a MySQL Pod. We can verify our `catalog` application is using this by inspecting its environment: -```bash +```bash tags=ipv4 $ kubectl -n catalog exec deployment/catalog -- env \ | grep DB_ENDPOINT DB_ENDPOINT=catalog-mysql:3306 diff --git a/website/docs/networking/security-groups-for-pods/using-rds.md b/website/docs/networking/security-groups-for-pods/using-rds.md index dfcdcb4cb..d96e2020a 100644 --- a/website/docs/networking/security-groups-for-pods/using-rds.md +++ b/website/docs/networking/security-groups-for-pods/using-rds.md @@ -5,7 +5,7 @@ sidebar_position: 20 An RDS database has been created in our account, let's retrieve its endpoint and password to be used later: -```bash +```bash tags=ipv4 $ export CATALOG_RDS_ENDPOINT_QUERY=$(aws rds describe-db-instances --db-instance-identifier $EKS_CLUSTER_NAME-catalog --query 'DBInstances[0].Endpoint') $ export CATALOG_RDS_ENDPOINT=$(echo $CATALOG_RDS_ENDPOINT_QUERY | jq -r '.Address+":"+(.Port|tostring)') $ echo $CATALOG_RDS_ENDPOINT @@ -15,7 +15,7 @@ $ export CATALOG_RDS_PASSWORD=$(aws ssm get-parameter --name $EKS_CLUSTER_NAME-c The first step in this process is to re-configure the catalog service to use an Amazon RDS dabase that has already been created. The application loads most of its configuration from a ConfigMap, let's take look at it: -```bash +```bash tags=ipv4 $ kubectl -n catalog get -o yaml cm catalog apiVersion: v1 data: @@ -36,13 +36,13 @@ ConfigMap/catalog Let's apply this change to use the the RDS database: -```bash +```bash tags=ipv4 $ kubectl apply -k ~/environment/eks-workshop/modules/networking/securitygroups-for-pods/rds ``` Check that the ConfigMap has been updated with the new values: -```bash +```bash tags=ipv4 $ kubectl get -n catalog cm catalog -o yaml apiVersion: v1 data: @@ -58,7 +58,7 @@ metadata: Now we need to recycle the catalog Pods to pick up our new ConfigMap contents: -```bash expectError=true +```bash tags=ipv4 expectError=true $ kubectl delete pod -n catalog -l app.kubernetes.io/component=service pod "catalog-788bb5d488-9p6cj" deleted $ kubectl rollout status -n catalog deployment/catalog --timeout 30s @@ -68,7 +68,7 @@ error: timed out waiting for the condition We got an error, it looks like our catalog Pods failed to restart in time. What's gone wrong? Let's check the Pod logs to see what happened: -```bash +```bash tags=ipv4 $ kubectl -n catalog logs deployment/catalog 2022/12/19 17:43:05 Error: Failed to prep migration dial tcp 10.42.11.72:3306: i/o timeout 2022/12/19 17:43:05 Error: Failed to run migration dial tcp 10.42.11.72:3306: i/o timeout @@ -77,7 +77,7 @@ $ kubectl -n catalog logs deployment/catalog Our Pod is unable to connect to the RDS database. We can check the EC2 Security Group thats been applied to the RDS database like so: -```bash +```bash tags=ipv4 $ aws ec2 describe-security-groups \ --filters Name=vpc-id,Values=$VPC_ID Name=tag:Name,Values=$EKS_CLUSTER_NAME-catalog-rds | jq '.' { diff --git a/website/docs/networking/vpc-lattice/lattice-ab-testing.md b/website/docs/networking/vpc-lattice/lattice-ab-testing.md index 71ec0e5c7..cdb208aec 100644 --- a/website/docs/networking/vpc-lattice/lattice-ab-testing.md +++ b/website/docs/networking/vpc-lattice/lattice-ab-testing.md @@ -1,6 +1,6 @@ --- title: "Traffic Management" -sidebar_position: 20 +sidebar_position: 30 --- In this section we will show how to use Amazon VPC Lattice for advanced traffic management with weighted routing for blue/green and canary-style deployments. @@ -20,66 +20,32 @@ NAME READY STATUS RESTARTS AGE checkout-854cd7cd66-s2blp 1/1 Running 0 26s ``` -# Set up Lattice Service Network - -The following YAML will create a Kubernetes gateway resource which is associated with a VPC Lattice **Service Network**. +Now let's demonstrate how weighted routing works by creating `HTTPRoute` resources. First we'll create a `TargetGroupPolicy` resources so that Lattice knows how to health check our checkout component: ```file -manifests/modules/networking/vpc-lattice/controller/eks-workshop-gw.yaml -``` - -Apply it with the following command: - -```bash -$ cat ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/eks-workshop-gw.yaml \ - | envsubst | kubectl apply -f - +manifests/modules/networking/vpc-lattice/routes/target-group-policy.yaml ``` -Verify that `eks-workshop` gateway is created: +Apply this resource: ```bash -$ kubectl get gateway -n checkout -NAME CLASS ADDRESS PROGRAMMED AGE -eks-workshop amazon-vpc-lattice True 29s +$ kubectl apply -f ~/environment/eks-workshop/modules/networking/vpc-lattice/routes/target-group-policy.yaml ``` -Once the gateway is created, find the VPC Lattice service network. Wait until the status is `Reconciled` (this could take about five minutes). - -```bash -$ kubectl describe gateway ${EKS_CLUSTER_NAME} -n checkout -apiVersion: gateway.networking.k8s.io/v1beta1 -kind: Gateway -status: - conditions: - message: 'aws-gateway-arn: arn:aws:vpc-lattice:us-west-2:1234567890:servicenetwork/sn-03015ffef38fdc005' - reason: Programmed - status: "True" - -$ kubectl wait --for=condition=Programmed gateway/${EKS_CLUSTER_NAME} -n checkout -``` - - Now you can see the associated **Service Network** created in the VPC console under the Lattice resources in the [AWS console](https://console.aws.amazon.com/vpc/home#ServiceNetworks). -![Checkout Service Network](assets/servicenetwork.png) - -# Create Routes to targets -Let's demonstrate how weighted routing works by creating `HTTPRoutes`. +Next create the Kubernetes `HTTPRoute` route that distributes 75% traffic to `checkoutv2` and remaining 25% traffic to `checkout`: -```bash -$ kubectl patch svc checkout -n checkout --patch '{"spec": { "type": "ClusterIP", "ports": [ { "name": "http", "port": 80, "protocol": "TCP", "targetPort": 8080 } ] } }' +```file +manifests/modules/networking/vpc-lattice/routes/checkout-route.yaml ``` -Create the Kubernetes `HTTPRoute` route that distributes 75% traffic to `checkoutv2` and remaining 25% traffic to `checkout`: +Apply this resource: ```bash hook=route $ cat ~/environment/eks-workshop/modules/networking/vpc-lattice/routes/checkout-route.yaml \ | envsubst | kubectl apply -f - ``` -```file -manifests/modules/networking/vpc-lattice/routes/checkout-route.yaml -``` - -This step may take 2-3 minutes, run the following command to wait for it to completed: +This creation of the associated resources may take 2-3 minutes, run the following command to wait for it to complete: ```bash wait=10 $ kubectl wait --for=jsonpath='{.status.parents[-1:].conditions[-1:].reason}'=ResolvedRefs httproute/checkoutroute -n checkout @@ -126,12 +92,22 @@ $ export CHECKOUT_ROUTE_DNS="http://$(kubectl get httproute checkoutroute -n che $ POD_NAME=$(kubectl -n ui get pods -o jsonpath='{.items[0].metadata.name}') $ kubectl exec $POD_NAME -n ui -- curl -s $CHECKOUT_ROUTE_DNS/health {"status":"ok","info":{},"error":{},"details":{}} +$ echo "DNS WAS $CHECKOUT_ROUTE_DNS" ``` Now we have to point the UI service to the VPC Lattice service endpoint by patching the `ConfigMap` for the UI component: +```kustomization +modules/networking/vpc-lattice/ui/configmap.yaml +ConfigMap/ui +``` + +Make this configuration change: + ```bash -$ kubectl patch configmap/ui -n ui --type merge -p '{"data":{"ENDPOINTS_CHECKOUT": "'${CHECKOUT_ROUTE_DNS}'"}}' +$ echo "DNS IS $CHECKOUT_ROUTE_DNS" +$ kustomize build ~/environment/eks-workshop/modules/networking/vpc-lattice/ui/ \ + | envsubst | kubectl apply -f - ``` Let's ensure that the UI pods are restarted and then port-forward to the preview of your application with Cloud9. diff --git a/website/docs/networking/vpc-lattice/service-network.md b/website/docs/networking/vpc-lattice/service-network.md new file mode 100644 index 000000000..d6a44a28f --- /dev/null +++ b/website/docs/networking/vpc-lattice/service-network.md @@ -0,0 +1,59 @@ +--- +title: "Service network" +sidebar_position: 20 +--- + +The Gateway API controller creates a VPC Lattice service network if one doesn’t exist when you create a `Gateway` and associate a Kubernetes cluster VPC with a service network. A service network is a logical boundary that’s used to automatically implement service discovery and connectivity as well as apply access and observability policies to a collection of services. It offers inter-application connectivity over HTTP, HTTPS, and gRPC protocols within a VPC. As of today, the controller supports HTTP and HTTPS. + +Before creating a `Gateway`, we need to formalize the types of load balancing implementations that are available via the Kubernetes resource model with a [GatewayClass](https://gateway-api.sigs.k8s.io/concepts/api-overview/#gatewayclass). The controller that listens to the Gateway API relies on an associated `GatewayClass` resource that the user can reference from their `Gateway`: + +```file +manifests/modules/networking/vpc-lattice/controller/gatewayclass.yaml +``` + +Lets create the `GatewayClass`: + +```bash +$ kubectl apply -f ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/gatewayclass.yaml +``` + +The following YAML will create a Kubernetes `Gateway` resource which is associated with a VPC Lattice **Service Network**. + +```file +manifests/modules/networking/vpc-lattice/controller/eks-workshop-gw.yaml +``` + +Apply it with the following command: + +```bash +$ cat ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/eks-workshop-gw.yaml \ + | envsubst | kubectl apply -f - +``` + +Verify that `eks-workshop` gateway is created: + +```bash +$ kubectl get gateway -n checkout +NAME CLASS ADDRESS PROGRAMMED AGE +eks-workshop amazon-vpc-lattice True 29s +``` + +Once the gateway is created, find the VPC Lattice service network. Wait until the status is `Reconciled` (this could take about five minutes). + +```bash +$ kubectl describe gateway ${EKS_CLUSTER_NAME} -n checkout +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: Gateway +status: + conditions: + message: 'aws-gateway-arn: arn:aws:vpc-lattice:us-west-2:1234567890:servicenetwork/sn-03015ffef38fdc005' + reason: Programmed + status: "True" + +$ kubectl wait --for=condition=Programmed gateway/${EKS_CLUSTER_NAME} -n checkout +``` + +Now you can see the associated **Service Network** created in the VPC console under the Lattice resources in the [AWS console](https://console.aws.amazon.com/vpc/home#ServiceNetworks). + +![Checkout Service Network](assets/servicenetwork.png) + diff --git a/website/docs/networking/vpc-lattice/setup.md b/website/docs/networking/vpc-lattice/setup.md index f3cd10386..6028a9139 100644 --- a/website/docs/networking/vpc-lattice/setup.md +++ b/website/docs/networking/vpc-lattice/setup.md @@ -7,14 +7,13 @@ sidebar_position: 10 Follow these instructions to create a cluster and deploy the AWS Gateway API Controller. -First, configure security group to receive traffic from the VPC Lattice fleet. You must set up security groups so that they allow all Pods communicating with VPC Lattice to allow traffic on all ports from the `169.254.171.0/24` address range. See [Control traffic to resources using security groups](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) for details. You can use the following managed prefix to provide the values: +First, configure security group to receive traffic from the VPC Lattice fleet. You must set up security groups so that they allow all Pods communicating with VPC Lattice to allow traffic on all ports from the `169.254.171.0/24` address range for IPv4 and the `fd00:ec2:80::/64` address range for IPv6. See [Control traffic to resources using security groups](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) for details. You can use the following managed prefix to provide the values: ```bash -$ PREFIX_LIST_ID=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists[?PrefixListName=="\'com.amazonaws.$AWS_REGION.vpc-lattice\'"].PrefixListId" | jq --raw-output .[]) -$ MANAGED_PREFIX=$(aws ec2 get-managed-prefix-list-entries --prefix-list-id $PREFIX_LIST_ID --output json | jq -r '.Entries[0].Cidr') $ CLUSTER_SG=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --output json| jq -r '.cluster.resourcesVpcConfig.clusterSecurityGroupId') -$ aws ec2 authorize-security-group-ingress --group-id $CLUSTER_SG --cidr $MANAGED_PREFIX --protocol -1 - +$ IPV4_PREFIX_LIST_ID=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists[?PrefixListName=="\'com.amazonaws.$AWS_REGION.vpc-lattice\'"].PrefixListId" | jq --raw-output .[]) +$ IPV4_MANAGED_PREFIX=$(aws ec2 get-managed-prefix-list-entries --prefix-list-id $IPV4_PREFIX_LIST_ID --output json | jq -r '.Entries[0].Cidr') +$ aws ec2 authorize-security-group-ingress --group-id $CLUSTER_SG --cidr $IPV4_MANAGED_PREFIX --protocol -1 { "Return": true, "SecurityGroupRules": [ @@ -30,6 +29,25 @@ $ aws ec2 authorize-security-group-ingress --group-id $CLUSTER_SG --cidr $MANAGE } ] } +$ IPV6_PREFIX_LIST_ID=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists[?PrefixListName=="\'com.amazonaws.$AWS_REGION.ipv6.vpc-lattice\'"].PrefixListId" | jq --raw-output .[]) +$ IPV6_MANAGED_PREFIX=$(aws ec2 get-managed-prefix-list-entries --prefix-list-id $IPV6_PREFIX_LIST_ID --output json | jq -r '.Entries[0].Cidr') +$ aws ec2 authorize-security-group-ingress --group-id $CLUSTER_SG \ + --ip-permissions IpProtocol=-1,FromPort=-1,ToPort=-1,Ipv6Ranges="[{CidrIpv6=$IPV6_MANAGED_PREFIX}]" +{ + "Return": true, + "SecurityGroupRules": [ + { + "SecurityGroupRuleId": "sgr-0eeda91601cbafbfa", + "GroupId": "sg-047f384df6b944788", + "GroupOwnerId": "364959265732", + "IsEgress": false, + "IpProtocol": "-1", + "FromPort": -1, + "ToPort": -1, + "CidrIpv6": "fd00:ec2:80::/64" + } + ] +} ``` This step will install the controller and the CRDs (Custom Resource Definitions) required to interact with the Kubernetes Gateway API. @@ -38,7 +56,7 @@ This step will install the controller and the CRDs (Custom Resource Definitions) $ aws ecr-public get-login-password --region us-east-1 | helm registry login --username AWS --password-stdin public.ecr.aws $ helm install gateway-api-controller \ oci://public.ecr.aws/aws-application-networking-k8s/aws-gateway-controller-chart \ - --version=v0.0.15 \ + --version=v0.0.16 \ --create-namespace \ --set=aws.region=${AWS_REGION} \ --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="$LATTICE_IAM_ROLE" \ @@ -46,14 +64,10 @@ $ helm install gateway-api-controller \ --wait ``` -Similar to `IngressClass` for `Ingress` and `StorageClass` for `PersistentVolumes`, before creating a `Gateway`, we need to formalize the types of load balancing implementations that are available via the Kubernetes resource model with a [GatewayClass](https://gateway-api.sigs.k8s.io/concepts/api-overview/#gatewayclass). The controller that listens to the Gateway API relies on an associated `GatewayClass` resource that the user can reference from their `Gateway`. +The controller will now be running as a deployment: ```bash -$ kubectl apply -f ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/gatewayclass.yaml -``` - -The command above will create the following resource: - -```file -manifests/modules/networking/vpc-lattice/controller/gatewayclass.yaml -``` +$ kubectl get deployment -n gateway-api-controller +NAME READY UP-TO-DATE AVAILABLE AGE +gateway-api-controller-aws-gateway-controller-chart 2/2 2 2 24s +``` \ No newline at end of file diff --git a/website/docs/observability/open-source-metrics/accessing-grafana.md b/website/docs/observability/open-source-metrics/accessing-grafana.md index 3a400a633..02224a39a 100644 --- a/website/docs/observability/open-source-metrics/accessing-grafana.md +++ b/website/docs/observability/open-source-metrics/accessing-grafana.md @@ -2,7 +2,6 @@ title: "Accessing Grafana" sidebar_position: 30 --- - An instance of Grafana has been pre-installed in your EKS cluster. To access it you first need to retrieve the URL: ```bash hook=check-grafana diff --git a/website/src/theme/MDXComponents.js b/website/src/theme/MDXComponents.js index 42296a712..1986dec51 100644 --- a/website/src/theme/MDXComponents.js +++ b/website/src/theme/MDXComponents.js @@ -15,4 +15,6 @@ export default { terminal: Terminal, browser: BrowserWindow, kustomization: Kustomization, + tabs: Tabs, + tabItem: TabItem, };