From 59f37157a78977bc25c4c36eedbd361ca7efc02e Mon Sep 17 00:00:00 2001 From: Matt Calvert Date: Mon, 26 Oct 2020 07:10:53 +0000 Subject: [PATCH 1/6] Changes to support Dual Stack networking --- README.md | 1 + docs/calico.md | 7 +-- docs/vars.md | 8 ++++ .../group_vars/k8s-cluster/k8s-cluster.yml | 19 ++++++++ .../group_vars/k8s-cluster/k8s-net-calico.yml | 3 ++ .../templates/kubeadm-config.v1beta2.yaml.j2 | 32 +++++++++++-- .../tasks/0080-system-configurations.yml | 9 ++++ roles/kubespray-defaults/defaults/main.yaml | 19 ++++++++ roles/network_plugin/calico/defaults/main.yml | 4 ++ roles/network_plugin/calico/tasks/install.yml | 46 +++++++++++++++++++ .../calico/templates/calico-node.yml.j2 | 7 ++- .../calico/templates/cni-calico.conflist.j2 | 4 ++ 12 files changed, 151 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 0c3c7b951c6..0d8a9b64e5b 100644 --- a/README.md +++ b/README.md @@ -160,6 +160,7 @@ Note: The list of available docker version is 18.09, 19.03 and 20.10. The recomm - **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is not supported for now** - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md)) - The target servers are configured to allow **IPv4 forwarding**. +- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. - The **firewalls are not managed**, you'll need to implement your own rules the way you used to. in order to avoid any issue during deployment you should disable your firewall. - If kubespray is ran from non-root user account, correct privilege escalation method diff --git a/docs/calico.md b/docs/calico.md index f722a09328e..7e5f865681a 100644 --- a/docs/calico.md +++ b/docs/calico.md @@ -58,13 +58,14 @@ To re-define you need to edit the inventory and add a group variable `calico_net calico_network_backend: none ``` -### Optional : Define the default pool CIDR +### Optional : Define the default pool CIDRs -By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool. -In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet`), it starts with the default IP Pool of which IP range CIDR can by defined in group_vars (k8s-cluster/k8s-net-calico.yml): +By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6. +In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s-cluster/k8s-net-calico.yml): ```ShellSession calico_pool_cidr: 10.233.64.0/20 +calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 ``` ### Optional : BGP Peering with border routers diff --git a/docs/vars.md b/docs/vars.md index d6c4d45dbfd..a97eee03513 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -62,6 +62,10 @@ following default cluster parameters: raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly (assertion not applicable to calico which doesn't use this as a hard limit, see [Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes). +* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services. +* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``. +* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``. +* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube-nodes can be in cluster. * *skydns_server* - Cluster IP for DNS (default is 10.233.0.3) * *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4) * *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/) @@ -87,6 +91,10 @@ Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. +## Enabling Dual Stack (IPV4 + IPV6) networking + +If *enable_dual_stack_networks* is set to ``true``, Dual Stack networking will be enabled in the cluster. This will use the default IPv4 and IPv6 subnets specified in the defaults file in the ``kubespray-defaults`` role, unless overridden of course. The default config will give you room for up to 256 nodes with 126 pods per node, and up to 4096 services. + ## DNS variables By default, hosts are set up with 8.8.8.8 as an upstream DNS server and all diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml index ac60e51efec..c0b1186493c 100644 --- a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml @@ -94,6 +94,25 @@ kube_pods_subnet: 10.233.64.0/18 # - kubelet_max_pods: 110 kube_network_node_prefix: 24 +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + # The port the API Server will be listening on. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" kube_apiserver_port: 6443 # (https) diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml b/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml index a6e925b9b34..d48821cce90 100644 --- a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml +++ b/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml @@ -20,6 +20,9 @@ # add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) # calico_pool_cidr: 1.2.3.4/5 +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + # Global as_num (/calico/bgp/v1/global/as_num) # global_as_num: "64512" diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 index 47570710c5c..325e13345bd 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 @@ -88,8 +88,14 @@ dns: imageTag: {{ coredns_image_tag }} networking: dnsDomain: {{ dns_domain }} - serviceSubnet: {{ kube_service_addresses }} - podSubnet: {{ kube_pods_subnet }} + serviceSubnet: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks }}" + podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}" +{% if kube_feature_gates %} +featureGates: +{% for kube_feature_gate in kube_feature_gates %} + {{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }} +{% endfor %} +{% endif %} kubernetesVersion: {{ kube_version }} {% if kubeadm_config_api_fqdn is defined %} controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }} @@ -127,6 +133,7 @@ apiServer: etcd-servers-overrides: "/events#{{ etcd_events_access_addresses_semicolon }}" {% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} + service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks }}" kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" profiling: "{{ kube_profiling }}" request-timeout: "{{ kube_apiserver_request_timeout }}" @@ -262,7 +269,14 @@ controllerManager: extraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} + cluster-cidr: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}" + service-cluster-ip-range: "{{ kube_service_addresses }}{{ ',' + kube_service_addresses_ipv6 if enable_dual_stack_networks }}" +{% if enable_dual_stack_networks %} + node-cidr-mask-size-ipv4: "{{ kube_network_node_prefix }}" + node-cidr-mask-size-ipv6: "{{ kube_network_node_prefix_ipv6 }}" +{% else %} node-cidr-mask-size: "{{ kube_network_node_prefix }}" +{% endif %} profiling: "{{ kube_profiling }}" terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}" bind-address: {{ kube_controller_manager_bind_address }} @@ -349,7 +363,7 @@ clientConnection: contentType: {{ kube_proxy_client_content_type }} kubeconfig: {{ kube_proxy_client_kubeconfig }} qps: {{ kube_proxy_client_qps }} -clusterCIDR: {{ kube_pods_subnet }} +clusterCIDR: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}" configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: maxPerCore: {{ kube_proxy_conntrack_max_per_core }} @@ -357,6 +371,12 @@ conntrack: tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} enableProfiling: {{ kube_proxy_enable_profiling }} +{% if kube_feature_gates %} +featureGates: +{% for kube_feature_gate in kube_feature_gates %} + {{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }} +{% endfor %} +{% endif %} healthzBindAddress: {{ kube_proxy_healthz_bind_address }} hostnameOverride: {{ kube_override_hostname }} iptables: @@ -404,3 +424,9 @@ clusterDNS: {% for dns_address in kubelet_cluster_dns %} - {{ dns_address }} {% endfor %} +{% if kube_feature_gates %} +featureGates: +{% for kube_feature_gate in kube_feature_gates %} + {{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }} +{% endfor %} +{% endif %} diff --git a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml index be0200921d6..396ef3fa7be 100644 --- a/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml +++ b/roles/kubernetes/preinstall/tasks/0080-system-configurations.yml @@ -62,6 +62,15 @@ state: present reload: yes +- name: Enable ipv6 forwarding + sysctl: + sysctl_file: "{{ sysctl_file_path }}" + name: net.ipv6.conf.all.forwarding + value: 1 + state: present + reload: yes + when: enable_dual_stack_networks | bool + - name: Ensure kube-bench parameters are set sysctl: sysctl_file: /etc/sysctl.d/bridge-nf-call.conf diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 8b78fcfc85b..0da603073e4 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -181,6 +181,25 @@ kube_pods_subnet: 10.233.64.0/18 # - kubelet_max_pods: 110 kube_network_node_prefix: 24 +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + # The virtual cluster IP, real host IPs and ports the API Server will be # listening on. # NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index b43f45c9d1b..f036ed210b0 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -12,6 +12,10 @@ ipip_mode: "{{ 'Always' if ipip else 'Never' }}" # change to "CrossSubnet" if y calico_ipip_mode: "{{ ipip_mode }}" calico_vxlan_mode: 'Never' +calico_ipip_mode_ipv6: Never +calico_vxlan_mode_ipv6: Never +calico_pool_blocksize_ipv6: 116 + calico_cert_dir: /etc/calico/certs # Global as_num (/calico/bgp/v1/global/as_num) diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 561bcd87603..cec3e6700db 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -108,6 +108,31 @@ - 'calico_conf.stdout == "0"' - calico_pool_cidr is defined +- name: Calico | Check if calico IPv6 network pool has already been configured + # noqa 306 - grep will exit 1 if no match found + shell: > + {{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}" | wc -l + args: + executable: /bin/bash + register: calico_conf_ipv6 + retries: 4 + until: calico_conf_ipv6.rc == 0 + delay: "{{ retry_stagger | random + 3 }}" + changed_when: false + when: + - inventory_hostname == groups['kube-master'][0] + - enable_dual_stack_networks + +- name: Calico | Ensure that calico_pool_cidr_ipv6 is within kube_pods_subnet_ipv6 when defined + assert: + that: "[calico_pool_cidr_ipv6] | ipaddr(kube_pods_subnet_ipv6) | length == 1" + msg: "{{ calico_pool_cidr_ipv6 }} is not within or equal to {{ kube_pods_subnet_ipv6 }}" + when: + - inventory_hostname == groups['kube-master'][0] + - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" + - calico_pool_cidr_ipv6 is defined + - enable_dual_stack_networks + - name: Calico | Create calico manifests for kdd template: src: "{{ item.file }}.j2" @@ -156,6 +181,27 @@ - inventory_hostname == groups['kube-master'][0] - 'calico_conf.stdout == "0"' +- name: Calico | Configure calico ipv6 network pool (version >= v3.3.0) + command: + cmd: "{{ bin_dir }}/calicoctl.sh apply -f -" + stdin: > + { "kind": "IPPool", + "apiVersion": "projectcalico.org/v3", + "metadata": { + "name": "{{ calico_pool_name }}-ipv6", + }, + "spec": { + "blockSize": {{ calico_pool_blocksize_ipv6 | default(kube_network_node_prefix_ipv6) }}, + "cidr": "{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}", + "ipipMode": "{{ calico_ipip_mode_ipv6 }}", + "vxlanMode": "{{ calico_vxlan_mode_ipv6 }}", + "natOutgoing": {{ nat_outgoing_ipv6|default(false) and not peer_with_router_ipv6|default(false) }} }} + when: + - inventory_hostname == groups['kube-master'][0] + - calico_conf_ipv6.stdout is defined and calico_conf_ipv6.stdout == "0" + - calico_version is version("v3.3.0", ">=") + - enable_dual_stack_networks | bool + - name: Populate Service External IPs set_fact: _service_external_ips: "{{ _service_external_ips|default([]) + [ {'cidr': item} ] }}" diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 1bbd800f65c..7c6c7900c37 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -200,9 +200,8 @@ spec: {% endif %} - name: CALICO_IPV4POOL_IPIP value: "{{ calico_ipv4pool_ipip }}" - # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT - value: "false" + value: "{{ enable_dual_stack_networks | default(false) }}" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "{{ calico_loglevel }}" @@ -239,6 +238,10 @@ spec: - name: IP value: "autodetect" {% endif %} +{% if enable_dual_stack_networks %} + - name: IP6 + value: autodetect +{% endif %} {% if calico_use_default_route_src_ipaddr|default(false) %} - name: FELIX_DEVICEROUTESOURCEADDRESS valueFrom: diff --git a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/roles/network_plugin/calico/templates/cni-calico.conflist.j2 index 35b66488da1..d012d21f073 100644 --- a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 +++ b/roles/network_plugin/calico/templates/cni-calico.conflist.j2 @@ -30,6 +30,10 @@ {% else %} "ipam": { "type": "calico-ipam", +{% if enable_dual_stack_networks %} + "assign_ipv6": "true", + "ipv6_pools": ["{{ calico_pool_cidr_ipv6 | default(kube_pods_subnet_ipv6) }}"], +{% endif %} "assign_ipv4": "true", "ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"] }, From 72db549ec33b4a329ccee2a631f48a4d58196677 Mon Sep 17 00:00:00 2001 From: Matt Calvert Date: Mon, 2 Nov 2020 17:55:38 +0000 Subject: [PATCH 2/6] Switch to use upstream kube_feature_gates logic --- .../templates/kubeadm-config.v1beta2.yaml.j2 | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 index 325e13345bd..37f2debbdcf 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 @@ -92,8 +92,8 @@ networking: podSubnet: "{{ kube_pods_subnet }}{{ ',' + kube_pods_subnet_ipv6 if enable_dual_stack_networks }}" {% if kube_feature_gates %} featureGates: -{% for kube_feature_gate in kube_feature_gates %} - {{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }} +{% for feature in kube_feature_gates %} + {{ feature|replace("=", ": ") }} {% endfor %} {% endif %} kubernetesVersion: {{ kube_version }} @@ -371,12 +371,6 @@ conntrack: tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} enableProfiling: {{ kube_proxy_enable_profiling }} -{% if kube_feature_gates %} -featureGates: -{% for kube_feature_gate in kube_feature_gates %} - {{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }} -{% endfor %} -{% endif %} healthzBindAddress: {{ kube_proxy_healthz_bind_address }} hostnameOverride: {{ kube_override_hostname }} iptables: @@ -401,9 +395,9 @@ portRange: {{ kube_proxy_port_range }} udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }} {% if kube_feature_gates %} featureGates: -{% for feature in kube_feature_gates %} +{% for feature in kube_feature_gates %} {{ feature|replace("=", ": ") }} -{% endfor %} +{% endfor %} {% endif %} {# DNS settings for kubelet #} {% if enable_nodelocaldns %} @@ -426,7 +420,7 @@ clusterDNS: {% endfor %} {% if kube_feature_gates %} featureGates: -{% for kube_feature_gate in kube_feature_gates %} - {{ kube_feature_gate.split("=")[0] }}: {{ kube_feature_gate.split("=")[1] }} +{% for feature in kube_feature_gates %} + {{ feature|replace("=", ": ") }} {% endfor %} {% endif %} From 5f5f92e5c168028802cc88d7551c18a45ecbf2ba Mon Sep 17 00:00:00 2001 From: Matt Calvert Date: Mon, 2 Nov 2020 17:58:06 +0000 Subject: [PATCH 3/6] Auto-add IPv6DualStack featureGate When enable_dual_stack_networks is set, we need to make sure IPv6DualStack=true is set too, otherwise we end up with a broken cluster. --- roles/kubernetes/preinstall/tasks/0040-set_facts.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index 1bb9e286bed..fdf1f921059 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -176,3 +176,10 @@ set_fact: kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins when: not usr.stat.writeable + +- name: Ensure IPv6DualStack featureGate is set when enable_dual_stack_networks is true + set_fact: + kube_feature_gates: "{{ kube_feature_gates + [ 'IPv6DualStack=true' ] }}" + when: + - enable_dual_stack_networks + - not 'IPv6DualStack=true' in kube_feature_gates From 3bfc5d5094c4e4e86053cbf8fb0fa149a11ad97a Mon Sep 17 00:00:00 2001 From: Matt Calvert Date: Tue, 3 Nov 2020 07:12:36 +0000 Subject: [PATCH 4/6] Ensure we gather IPv6 facts --- facts.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/facts.yml b/facts.yml index 9296da46cfe..a599f137175 100644 --- a/facts.yml +++ b/facts.yml @@ -14,6 +14,8 @@ loop: - ansible_distribution_major_version - ansible_default_ipv4 + - ansible_default_ipv6 - ansible_all_ipv4_addresses + - ansible_all_ipv6_addresses - ansible_memtotal_mb - ansible_swaptotal_mb From da73632246d895508bdf5ade6d68dfd76eff70bb Mon Sep 17 00:00:00 2001 From: Matt Calvert Date: Fri, 11 Dec 2020 20:19:34 +0000 Subject: [PATCH 5/6] Add IPv6 libvirt details to the Vagrantfile --- Vagrantfile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 805044ad44e..066e8920d11 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -49,6 +49,7 @@ $vm_cpus ||= 2 $shared_folders ||= {} $forwarded_ports ||= {} $subnet ||= "172.18.8" +$subnet_ipv6 ||= "fd3c:b398:0698:0756" $os ||= "ubuntu1804" $network_plugin ||= "flannel" # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni @@ -194,7 +195,12 @@ Vagrant.configure("2") do |config| end ip = "#{$subnet}.#{i+100}" - node.vm.network :private_network, ip: ip + node.vm.network :private_network, ip: ip, + :libvirt__guest_ipv6 => 'yes', + :libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}", + :libvirt__ipv6_prefix => "64", + :libvirt__forward_mode => "none", + :libvirt__dhcp_enabled => false # Disable swap for each vm node.vm.provision "shell", inline: "swapoff -a" From 125aba37f96d216eeba4f5315cef39d02c30048a Mon Sep 17 00:00:00 2001 From: Matt Calvert Date: Fri, 11 Dec 2020 20:52:51 +0000 Subject: [PATCH 6/6] Add in tests for Calico with dual-stack networking --- .gitlab-ci/vagrant.yml | 5 +++++ Vagrantfile | 6 ++++++ tests/files/vagrant_ubuntu18-calico-dual-stack.rb | 7 +++++++ tests/files/vagrant_ubuntu18-calico-dual-stack.yml | 8 ++++++++ 4 files changed, 26 insertions(+) create mode 100644 tests/files/vagrant_ubuntu18-calico-dual-stack.rb create mode 100644 tests/files/vagrant_ubuntu18-calico-dual-stack.yml diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml index 7861dbe3ccc..445393973fa 100644 --- a/.gitlab-ci/vagrant.yml +++ b/.gitlab-ci/vagrant.yml @@ -38,6 +38,11 @@ molecule_tests: after_script: - chronic ./tests/scripts/testcases_cleanup.sh +vagrant_ubuntu18-calico-dual-stack: + stage: deploy-part2 + extends: .vagrant + when: on_success + vagrant_ubuntu18-flannel: stage: deploy-part2 extends: .vagrant diff --git a/Vagrantfile b/Vagrantfile index 066e8920d11..d5736d913a6 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -205,6 +205,12 @@ Vagrant.configure("2") do |config| # Disable swap for each vm node.vm.provision "shell", inline: "swapoff -a" + # ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that. + if ["ubuntu1804", "ubuntu2004"].include? $os + node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf" + node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf" + end + # Disable firewalld on oraclelinux/redhat vms if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld" diff --git a/tests/files/vagrant_ubuntu18-calico-dual-stack.rb b/tests/files/vagrant_ubuntu18-calico-dual-stack.rb new file mode 100644 index 00000000000..f7d7765ebea --- /dev/null +++ b/tests/files/vagrant_ubuntu18-calico-dual-stack.rb @@ -0,0 +1,7 @@ +# For CI we are not worried about data persistence across reboot +$libvirt_volume_cache = "unsafe" + +# Checking for box update can trigger API rate limiting +# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html +$box_check_update = false +$network_plugin = "calico" diff --git a/tests/files/vagrant_ubuntu18-calico-dual-stack.yml b/tests/files/vagrant_ubuntu18-calico-dual-stack.yml new file mode 100644 index 00000000000..533ebbbbabb --- /dev/null +++ b/tests/files/vagrant_ubuntu18-calico-dual-stack.yml @@ -0,0 +1,8 @@ +--- +# Kubespray settings + +kube_network_plugin: calico +enable_dual_stack_networks: true + +deploy_netchecker: true +dns_min_replicas: 1