diff --git a/contrib/dind/roles/dind-cluster/tasks/main.yaml b/contrib/dind/roles/dind-cluster/tasks/main.yaml index 247a0a8e98e..4f96388c041 100644 --- a/contrib/dind/roles/dind-cluster/tasks/main.yaml +++ b/contrib/dind/roles/dind-cluster/tasks/main.yaml @@ -1,9 +1,9 @@ --- -- name: set_fact distro_setup +- name: Set_fact distro_setup set_fact: distro_setup: "{{ distro_settings[node_distro] }}" -- name: set_fact other distro settings +- name: Set_fact other distro settings set_fact: distro_user: "{{ distro_setup['user'] }}" distro_ssh_service: "{{ distro_setup['ssh_service'] }}" diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml index 5b63a6b37d0..7d60aa55251 100644 --- a/contrib/dind/roles/dind-host/tasks/main.yaml +++ b/contrib/dind/roles/dind-host/tasks/main.yaml @@ -1,9 +1,9 @@ --- -- name: set_fact distro_setup +- name: Set_fact distro_setup set_fact: distro_setup: "{{ distro_settings[node_distro] }}" -- name: set_fact other distro settings +- name: Set_fact other distro settings set_fact: distro_image: "{{ distro_setup['image'] }}" distro_init: "{{ distro_setup['init'] }}" diff --git a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml index 0a58598505d..5e187c39671 100644 --- a/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml +++ b/contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/main.yml @@ -4,11 +4,11 @@ include_vars: "{{ ansible_os_family }}.yml" # Install xfs package -- name: install xfs Debian +- name: Install xfs Debian apt: name=xfsprogs state=present when: ansible_os_family == "Debian" -- name: install xfs RedHat +- name: Install xfs RedHat package: name=xfsprogs state=present when: ansible_os_family == "RedHat" @@ -17,7 +17,7 @@ filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}" # Mount external volumes -- name: mounting new xfs filesystem +- name: Mounting new xfs filesystem mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted" # Setup/install tasks. diff --git a/contrib/network-storage/heketi/roles/provision/handlers/main.yml b/contrib/network-storage/heketi/roles/provision/handlers/main.yml index 9e876de177b..4e768addaf2 100644 --- a/contrib/network-storage/heketi/roles/provision/handlers/main.yml +++ b/contrib/network-storage/heketi/roles/provision/handlers/main.yml @@ -1,3 +1,3 @@ --- -- name: "stop port forwarding" +- name: "Stop port forwarding" command: "killall " diff --git a/playbooks/legacy_groups.yml b/playbooks/legacy_groups.yml index 0d017106f74..643032ff098 100644 --- a/playbooks/legacy_groups.yml +++ b/playbooks/legacy_groups.yml @@ -6,7 +6,7 @@ gather_facts: false tags: always tasks: - - name: add nodes to kube_control_plane group + - name: Add nodes to kube_control_plane group group_by: key: 'kube_control_plane' @@ -15,7 +15,7 @@ gather_facts: false tags: always tasks: - - name: add nodes to kube_node group + - name: Add nodes to kube_node group group_by: key: 'kube_node' @@ -24,7 +24,7 @@ gather_facts: false tags: always tasks: - - name: add nodes to k8s_cluster group + - name: Add nodes to k8s_cluster group group_by: key: 'k8s_cluster' @@ -33,7 +33,7 @@ gather_facts: false tags: always tasks: - - name: add nodes to calico_rr group + - name: Add nodes to calico_rr group group_by: key: 'calico_rr' @@ -42,6 +42,6 @@ gather_facts: false tags: always tasks: - - name: add nodes to no-floating group + - name: Add nodes to no-floating group group_by: key: 'no_floating' diff --git a/playbooks/reset.yml b/playbooks/reset.yml index 6fa9fa3accb..768b11fd454 100644 --- a/playbooks/reset.yml +++ b/playbooks/reset.yml @@ -24,7 +24,7 @@ private: no pre_tasks: - - name: check confirmation + - name: Check confirmation fail: msg: "Reset confirmation failed" when: reset_confirmation != "yes" diff --git a/playbooks/scale.yml b/playbooks/scale.yml index 8e79bfa038b..08e0f35bc67 100644 --- a/playbooks/scale.yml +++ b/playbooks/scale.yml @@ -98,7 +98,7 @@ environment: "{{ proxy_disable_env }}" register: kubeadm_upload_cert changed_when: false - - name: set fact 'kubeadm_certificate_key' for later use + - name: Set fact 'kubeadm_certificate_key' for later use set_fact: kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}" when: kubeadm_certificate_key is not defined diff --git a/roles/bastion-ssh-config/tasks/main.yml b/roles/bastion-ssh-config/tasks/main.yml index a18291b3bf1..920763eb583 100644 --- a/roles/bastion-ssh-config/tasks/main.yml +++ b/roles/bastion-ssh-config/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: set bastion host IP and port +- name: Set bastion host IP and port set_fact: bastion_ip: "{{ hostvars[groups['bastion'][0]]['ansible_host'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_host']) }}" bastion_port: "{{ hostvars[groups['bastion'][0]]['ansible_port'] | d(hostvars[groups['bastion'][0]]['ansible_ssh_port']) | d(22) }}" @@ -12,7 +12,7 @@ set_fact: real_user: "{{ ansible_user }}" -- name: create ssh bastion conf +- name: Create ssh bastion conf become: false delegate_to: localhost connection: local diff --git a/roles/container-engine/containerd-common/tasks/main.yml b/roles/container-engine/containerd-common/tasks/main.yml index cfd78f3a379..2df25fcf423 100644 --- a/roles/container-engine/containerd-common/tasks/main.yml +++ b/roles/container-engine/containerd-common/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: containerd-common | check if fedora coreos +- name: Containerd-common | check if fedora coreos stat: path: /run/ostree-booted get_attributes: no @@ -7,11 +7,11 @@ get_mime: no register: ostree -- name: containerd-common | set is_ostree +- name: Containerd-common | set is_ostree set_fact: is_ostree: "{{ ostree.stat.exists }}" -- name: containerd-common | gather os specific variables +- name: Containerd-common | gather os specific variables include_vars: "{{ item }}" with_first_found: - files: diff --git a/roles/container-engine/containerd/handlers/main.yml b/roles/container-engine/containerd/handlers/main.yml index d2f12658f9b..3c132bdf0d0 100644 --- a/roles/container-engine/containerd/handlers/main.yml +++ b/roles/container-engine/containerd/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: restart containerd +- name: Restart containerd command: /bin/true notify: - Containerd | restart containerd diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index 5ec9c28acdc..e3d47d886d6 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -5,33 +5,33 @@ when: - not (allow_unsupported_distribution_setup | default(false)) and (ansible_distribution not in containerd_supported_distributions) -- name: containerd | Remove any package manager controlled containerd package +- name: Containerd | Remove any package manager controlled containerd package package: name: "{{ containerd_package }}" state: absent when: - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) -- name: containerd | Remove containerd repository +- name: Containerd | Remove containerd repository file: path: "{{ yum_repo_dir }}/containerd.repo" state: absent when: - ansible_os_family in ['RedHat'] -- name: containerd | Remove containerd repository +- name: Containerd | Remove containerd repository apt_repository: repo: "{{ item }}" state: absent with_items: "{{ containerd_repo_info.repos }}" when: ansible_pkg_mgr == 'apt' -- name: containerd | Download containerd +- name: Containerd | Download containerd include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.containerd) }}" -- name: containerd | Unpack containerd archive +- name: Containerd | Unpack containerd archive unarchive: src: "{{ downloads.containerd.dest }}" dest: "{{ containerd_bin_dir }}" @@ -41,7 +41,7 @@ - --strip-components=1 notify: restart containerd -- name: containerd | Remove orphaned binary +- name: Containerd | Remove orphaned binary file: path: "/usr/bin/{{ item }}" state: absent @@ -56,14 +56,14 @@ - containerd-shim-runc-v2 - ctr -- name: containerd | Generate systemd service for containerd +- name: Containerd | Generate systemd service for containerd template: src: containerd.service.j2 dest: /etc/systemd/system/containerd.service mode: 0644 notify: restart containerd -- name: containerd | Ensure containerd directories exist +- name: Containerd | Ensure containerd directories exist file: dest: "{{ item }}" state: directory @@ -76,7 +76,7 @@ - "{{ containerd_storage_dir }}" - "{{ containerd_state_dir }}" -- name: containerd | Write containerd proxy drop-in +- name: Containerd | Write containerd proxy drop-in template: src: http-proxy.conf.j2 dest: "{{ containerd_systemd_dir }}/http-proxy.conf" @@ -84,17 +84,17 @@ notify: restart containerd when: http_proxy is defined or https_proxy is defined -- name: containerd | Generate default base_runtime_spec +- name: Containerd | Generate default base_runtime_spec register: ctr_oci_spec command: "{{ containerd_bin_dir }}/ctr oci spec" check_mode: false changed_when: false -- name: containerd | Store generated default base_runtime_spec +- name: Containerd | Store generated default base_runtime_spec set_fact: containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}" -- name: containerd | Write base_runtime_specs +- name: Containerd | Write base_runtime_specs copy: content: "{{ item.value }}" dest: "{{ containerd_cfg_dir }}/{{ item.key }}" @@ -103,7 +103,7 @@ with_dict: "{{ containerd_base_runtime_specs | default({}) }}" notify: restart containerd -- name: containerd | Copy containerd config file +- name: Containerd | Copy containerd config file template: src: config.toml.j2 dest: "{{ containerd_cfg_dir }}/config.toml" @@ -112,14 +112,14 @@ notify: restart containerd - block: - - name: containerd | Create registry directories + - name: Containerd | Create registry directories file: path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}" state: directory mode: 0755 recurse: true with_dict: "{{ containerd_insecure_registries }}" - - name: containerd | Write hosts.toml file + - name: Containerd | Write hosts.toml file blockinfile: path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml" mode: 0640 @@ -134,10 +134,10 @@ # you can sometimes end up in a state where everything is installed # but containerd was not started / enabled -- name: containerd | Flush handlers +- name: Containerd | Flush handlers meta: flush_handlers -- name: containerd | Ensure containerd is started and enabled +- name: Containerd | Ensure containerd is started and enabled systemd: name: containerd daemon_reload: yes diff --git a/roles/container-engine/containerd/tasks/reset.yml b/roles/container-engine/containerd/tasks/reset.yml index 5c551b6d97c..4bc40da8303 100644 --- a/roles/container-engine/containerd/tasks/reset.yml +++ b/roles/container-engine/containerd/tasks/reset.yml @@ -1,5 +1,5 @@ --- -- name: containerd | Remove containerd repository for RedHat os family +- name: Containerd | Remove containerd repository for RedHat os family file: path: "{{ yum_repo_dir }}/containerd.repo" state: absent @@ -8,7 +8,7 @@ tags: - reset_containerd -- name: containerd | Remove containerd repository for Debian os family +- name: Containerd | Remove containerd repository for Debian os family apt_repository: repo: "{{ item }}" state: absent @@ -17,7 +17,7 @@ tags: - reset_containerd -- name: containerd | Stop containerd service +- name: Containerd | Stop containerd service service: name: containerd daemon_reload: true @@ -27,7 +27,7 @@ tags: - reset_containerd -- name: containerd | Remove configuration files +- name: Containerd | Remove configuration files file: path: "{{ item }}" state: absent diff --git a/roles/container-engine/cri-dockerd/handlers/main.yml b/roles/container-engine/cri-dockerd/handlers/main.yml index 9d9d8c64361..5f926b635d8 100644 --- a/roles/container-engine/cri-dockerd/handlers/main.yml +++ b/roles/container-engine/cri-dockerd/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: restart and enable cri-dockerd +- name: Restart and enable cri-dockerd command: /bin/true notify: - cri-dockerd | reload systemd @@ -8,28 +8,28 @@ - cri-dockerd | reload cri-dockerd.service - cri-dockerd | enable cri-dockerd service -- name: cri-dockerd | reload systemd +- name: Cri-dockerd | reload systemd systemd: name: cri-dockerd daemon_reload: true masked: no -- name: cri-dockerd | restart docker.service +- name: Cri-dockerd | restart docker.service service: name: docker.service state: restarted -- name: cri-dockerd | reload cri-dockerd.socket +- name: Cri-dockerd | reload cri-dockerd.socket service: name: cri-dockerd.socket state: restarted -- name: cri-dockerd | reload cri-dockerd.service +- name: Cri-dockerd | reload cri-dockerd.service service: name: cri-dockerd.service state: restarted -- name: cri-dockerd | enable cri-dockerd service +- name: Cri-dockerd | enable cri-dockerd service service: name: cri-dockerd.service enabled: yes diff --git a/roles/container-engine/cri-dockerd/tasks/main.yml b/roles/container-engine/cri-dockerd/tasks/main.yml index 9ce3ec6efef..70da654df81 100644 --- a/roles/container-engine/cri-dockerd/tasks/main.yml +++ b/roles/container-engine/cri-dockerd/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: runc | Download cri-dockerd binary +- name: Runc | Download cri-dockerd binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.cri_dockerd) }}" diff --git a/roles/container-engine/cri-o/handlers/main.yml b/roles/container-engine/cri-o/handlers/main.yml index 8bc936b457f..763f4b558b6 100644 --- a/roles/container-engine/cri-o/handlers/main.yml +++ b/roles/container-engine/cri-o/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: restart crio +- name: Restart crio command: /bin/true notify: - CRI-O | reload systemd diff --git a/roles/container-engine/crictl/tasks/crictl.yml b/roles/container-engine/crictl/tasks/crictl.yml index 36e09e4a8a3..cffa0505600 100644 --- a/roles/container-engine/crictl/tasks/crictl.yml +++ b/roles/container-engine/crictl/tasks/crictl.yml @@ -1,5 +1,5 @@ --- -- name: crictl | Download crictl +- name: Crictl | Download crictl include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.crictl) }}" diff --git a/roles/container-engine/crictl/tasks/main.yml b/roles/container-engine/crictl/tasks/main.yml index a0711269646..9337016c17e 100644 --- a/roles/container-engine/crictl/tasks/main.yml +++ b/roles/container-engine/crictl/tasks/main.yml @@ -1,3 +1,3 @@ --- -- name: install crictl +- name: Install crictl include_tasks: crictl.yml diff --git a/roles/container-engine/crun/tasks/main.yml b/roles/container-engine/crun/tasks/main.yml index a1d29a78f77..b49db033292 100644 --- a/roles/container-engine/crun/tasks/main.yml +++ b/roles/container-engine/crun/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: crun | Download crun binary +- name: Crun | Download crun binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.crun) }}" diff --git a/roles/container-engine/docker-storage/tasks/main.yml b/roles/container-engine/docker-storage/tasks/main.yml index 462938191fe..ec129753ddc 100644 --- a/roles/container-engine/docker-storage/tasks/main.yml +++ b/roles/container-engine/docker-storage/tasks/main.yml @@ -1,18 +1,18 @@ --- -- name: docker-storage-setup | install git and make +- name: Docker-storage-setup | install git and make with_items: [git, make] package: pkg: "{{ item }}" state: present -- name: docker-storage-setup | docker-storage-setup sysconfig template +- name: Docker-storage-setup | docker-storage-setup sysconfig template template: src: docker-storage-setup.j2 dest: /etc/sysconfig/docker-storage-setup mode: 0644 -- name: docker-storage-override-directory | docker service storage-setup override dir +- name: Docker-storage-override-directory | docker service storage-setup override dir file: dest: /etc/systemd/system/docker.service.d mode: 0755 @@ -20,7 +20,7 @@ group: root state: directory -- name: docker-storage-override | docker service storage-setup override file +- name: Docker-storage-override | docker service storage-setup override file copy: dest: /etc/systemd/system/docker.service.d/override.conf content: |- @@ -33,12 +33,12 @@ mode: 0644 # https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository -- name: docker-storage-setup | install lvm2 +- name: Docker-storage-setup | install lvm2 package: name: lvm2 state: present -- name: docker-storage-setup | install and run container-storage-setup +- name: Docker-storage-setup | install and run container-storage-setup become: yes script: | install_container_storage_setup.sh \ diff --git a/roles/container-engine/docker/handlers/main.yml b/roles/container-engine/docker/handlers/main.yml index 8c26de27308..14a7b3973d3 100644 --- a/roles/container-engine/docker/handlers/main.yml +++ b/roles/container-engine/docker/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: restart docker +- name: Restart docker command: /bin/true notify: - Docker | reload systemd diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index ae7b574d493..32e90549e95 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: check if fedora coreos +- name: Check if fedora coreos stat: path: /run/ostree-booted get_attributes: no @@ -7,18 +7,18 @@ get_mime: no register: ostree -- name: set is_ostree +- name: Set is_ostree set_fact: is_ostree: "{{ ostree.stat.exists }}" -- name: set docker_version for openEuler +- name: Set docker_version for openEuler set_fact: docker_version: '19.03' when: ansible_distribution == "openEuler" tags: - facts -- name: gather os specific variables +- name: Gather os specific variables include_vars: "{{ item }}" with_first_found: - files: @@ -51,7 +51,7 @@ - import_tasks: pre-upgrade.yml -- name: ensure docker-ce repository public key is installed +- name: Ensure docker-ce repository public key is installed apt_key: id: "{{ item }}" url: "{{ docker_repo_key_info.url }}" @@ -64,7 +64,7 @@ environment: "{{ proxy_env }}" when: ansible_pkg_mgr == 'apt' -- name: ensure docker-ce repository is enabled +- name: Ensure docker-ce repository is enabled apt_repository: repo: "{{ item }}" state: present @@ -99,7 +99,7 @@ - docker-ce - docker-ce-cli -- name: ensure docker packages are installed +- name: Ensure docker packages are installed package: name: "{{ docker_package_info.pkgs }}" state: "{{ docker_package_info.state | default('present') }}" @@ -135,9 +135,9 @@ - docker-ce - docker-ce-cli -- name: ensure docker started, remove our config if docker start failed and try again +- name: Ensure docker started, remove our config if docker start failed and try again block: - - name: ensure service is started if docker packages are already present + - name: Ensure service is started if docker packages are already present service: name: docker state: started @@ -145,7 +145,7 @@ rescue: - debug: # noqa unnamed-task msg: "Docker start failed. Try to remove our config" - - name: remove kubespray generated config + - name: Remove kubespray generated config file: path: "{{ item }}" state: absent @@ -156,7 +156,7 @@ - /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf notify: restart docker -- name: flush handlers so we can wait for docker to come up +- name: Flush handlers so we can wait for docker to come up meta: flush_handlers # Install each plugin using a looped include to make error handling in the included task simpler. @@ -168,7 +168,7 @@ - name: Set docker systemd config import_tasks: systemd.yml -- name: ensure docker service is started and enabled +- name: Ensure docker service is started and enabled service: name: "{{ item }}" enabled: yes diff --git a/roles/container-engine/docker/tasks/reset.yml b/roles/container-engine/docker/tasks/reset.yml index 76d125b3707..a37a853ebe3 100644 --- a/roles/container-engine/docker/tasks/reset.yml +++ b/roles/container-engine/docker/tasks/reset.yml @@ -21,7 +21,7 @@ ignore_errors: true # noqa ignore-errors when: docker_packages_list|length>0 -- name: reset | remove all containers +- name: Reset | remove all containers shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" args: executable: /bin/bash diff --git a/roles/container-engine/docker/tasks/set_facts_dns.yml b/roles/container-engine/docker/tasks/set_facts_dns.yml index d800373002f..23b70ac0eed 100644 --- a/roles/container-engine/docker/tasks/set_facts_dns.yml +++ b/roles/container-engine/docker/tasks/set_facts_dns.yml @@ -1,23 +1,23 @@ --- -- name: set dns server for docker +- name: Set dns server for docker set_fact: docker_dns_servers: "{{ dns_servers }}" -- name: show docker_dns_servers +- name: Show docker_dns_servers debug: msg: "{{ docker_dns_servers }}" -- name: add upstream dns servers +- name: Add upstream dns servers set_fact: docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}" when: dns_mode in ['coredns', 'coredns_dual'] -- name: add global searchdomains +- name: Add global searchdomains set_fact: docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}" -- name: check system nameservers +- name: Check system nameservers shell: set -o pipefail && grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/' args: executable: /bin/bash @@ -25,7 +25,7 @@ register: system_nameservers check_mode: no -- name: check system search domains +- name: Check system search domains # noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false # Therefore -o pipefail is not applicable in this specific instance shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/' @@ -35,32 +35,32 @@ register: system_search_domains check_mode: no -- name: add system nameservers to docker options +- name: Add system nameservers to docker options set_fact: docker_dns_servers: "{{ docker_dns_servers | union(system_nameservers.stdout_lines) | unique }}" when: system_nameservers.stdout -- name: add system search domains to docker options +- name: Add system search domains to docker options set_fact: docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split()|default([])) | unique }}" when: system_search_domains.stdout -- name: check number of nameservers +- name: Check number of nameservers fail: msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in docker.yml and we will only use the first 3." when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool -- name: rtrim number of nameservers to 3 +- name: Rtrim number of nameservers to 3 set_fact: docker_dns_servers: "{{ docker_dns_servers[0:3] }}" when: docker_dns_servers|length > 3 and not docker_dns_servers_strict|bool -- name: check number of search domains +- name: Check number of search domains fail: msg: "Too many search domains" when: docker_dns_search_domains|length > 6 -- name: check length of search domains +- name: Check length of search domains fail: msg: "Search domains exceeded limit of 256 characters" when: docker_dns_search_domains|join(' ')|length > 256 diff --git a/roles/container-engine/docker/tasks/systemd.yml b/roles/container-engine/docker/tasks/systemd.yml index 0c040fee78f..a5f8c1d8236 100644 --- a/roles/container-engine/docker/tasks/systemd.yml +++ b/roles/container-engine/docker/tasks/systemd.yml @@ -13,7 +13,7 @@ notify: restart docker when: http_proxy is defined or https_proxy is defined -- name: get systemd version +- name: Get systemd version # noqa 303 - systemctl is called intentionally here shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2 args: diff --git a/roles/container-engine/gvisor/tasks/main.yml b/roles/container-engine/gvisor/tasks/main.yml index fa5bd725e4e..9b0fb3976ad 100644 --- a/roles/container-engine/gvisor/tasks/main.yml +++ b/roles/container-engine/gvisor/tasks/main.yml @@ -1,15 +1,15 @@ --- -- name: gVisor | Download runsc binary +- name: GVisor | Download runsc binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.gvisor_runsc) }}" -- name: gVisor | Download containerd-shim-runsc-v1 binary +- name: GVisor | Download containerd-shim-runsc-v1 binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.gvisor_containerd_shim) }}" -- name: gVisor | Copy binaries +- name: GVisor | Copy binaries copy: src: "{{ local_release_dir }}/gvisor-{{ item }}" dest: "{{ bin_dir }}/{{ item }}" diff --git a/roles/container-engine/kata-containers/tasks/main.yml b/roles/container-engine/kata-containers/tasks/main.yml index 54bd25d0fed..71f86765a0e 100644 --- a/roles/container-engine/kata-containers/tasks/main.yml +++ b/roles/container-engine/kata-containers/tasks/main.yml @@ -1,23 +1,23 @@ --- -- name: kata-containers | Download kata binary +- name: Kata-containers | Download kata binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.kata_containers) }}" -- name: kata-containers | Copy kata-containers binary +- name: Kata-containers | Copy kata-containers binary unarchive: src: "{{ local_release_dir }}/kata-static-{{ kata_containers_version }}-{{ image_arch }}.tar.xz" dest: "/" mode: 0755 remote_src: yes -- name: kata-containers | Create config directory +- name: Kata-containers | Create config directory file: path: "{{ kata_containers_config_dir }}" state: directory mode: 0755 -- name: kata-containers | Set configuration +- name: Kata-containers | Set configuration template: src: "{{ item }}.j2" dest: "{{ kata_containers_config_dir }}/{{ item }}" @@ -25,7 +25,7 @@ with_items: - configuration-qemu.toml -- name: kata-containers | Set containerd bin +- name: Kata-containers | Set containerd bin vars: shim: "{{ item }}" template: @@ -35,7 +35,7 @@ with_items: - qemu -- name: kata-containers | Load vhost kernel modules +- name: Kata-containers | Load vhost kernel modules modprobe: state: present name: "{{ item }}" @@ -43,7 +43,7 @@ - vhost_vsock - vhost_net -- name: kata-containers | Persist vhost kernel modules +- name: Kata-containers | Persist vhost kernel modules copy: dest: /etc/modules-load.d/kubespray-kata-containers.conf mode: 0644 diff --git a/roles/container-engine/nerdctl/tasks/main.yml b/roles/container-engine/nerdctl/tasks/main.yml index ad088391f03..e4e4ebd1584 100644 --- a/roles/container-engine/nerdctl/tasks/main.yml +++ b/roles/container-engine/nerdctl/tasks/main.yml @@ -1,10 +1,10 @@ --- -- name: nerdctl | Download nerdctl +- name: Nerdctl | Download nerdctl include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.nerdctl) }}" -- name: nerdctl | Copy nerdctl binary from download dir +- name: Nerdctl | Copy nerdctl binary from download dir copy: src: "{{ local_release_dir }}/nerdctl" dest: "{{ bin_dir }}/nerdctl" @@ -17,7 +17,7 @@ - Get nerdctl completion - Install nerdctl completion -- name: nerdctl | Create configuration dir +- name: Nerdctl | Create configuration dir file: path: /etc/nerdctl state: directory @@ -26,7 +26,7 @@ group: root become: true -- name: nerdctl | Install nerdctl configuration +- name: Nerdctl | Install nerdctl configuration template: src: nerdctl.toml.j2 dest: /etc/nerdctl/nerdctl.toml diff --git a/roles/container-engine/runc/tasks/main.yml b/roles/container-engine/runc/tasks/main.yml index 7a8e336c2f7..542a447d5a5 100644 --- a/roles/container-engine/runc/tasks/main.yml +++ b/roles/container-engine/runc/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: runc | check if fedora coreos +- name: Runc | check if fedora coreos stat: path: /run/ostree-booted get_attributes: no @@ -7,18 +7,18 @@ get_mime: no register: ostree -- name: runc | set is_ostree +- name: Runc | set is_ostree set_fact: is_ostree: "{{ ostree.stat.exists }}" -- name: runc | Uninstall runc package managed by package manager +- name: Runc | Uninstall runc package managed by package manager package: name: "{{ runc_package_name }}" state: absent when: - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) -- name: runc | Download runc binary +- name: Runc | Download runc binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.runc) }}" @@ -30,7 +30,7 @@ mode: 0755 remote_src: true -- name: runc | Remove orphaned binary +- name: Runc | Remove orphaned binary file: path: /usr/bin/runc state: absent diff --git a/roles/container-engine/skopeo/tasks/main.yml b/roles/container-engine/skopeo/tasks/main.yml index 033ae629fa9..cef0424cd12 100644 --- a/roles/container-engine/skopeo/tasks/main.yml +++ b/roles/container-engine/skopeo/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: skopeo | check if fedora coreos +- name: Skopeo | check if fedora coreos stat: path: /run/ostree-booted get_attributes: no @@ -7,11 +7,11 @@ get_mime: no register: ostree -- name: skopeo | set is_ostree +- name: Skopeo | set is_ostree set_fact: is_ostree: "{{ ostree.stat.exists }}" -- name: skopeo | Uninstall skopeo package managed by package manager +- name: Skopeo | Uninstall skopeo package managed by package manager package: name: skopeo state: absent @@ -19,7 +19,7 @@ - not (is_ostree or (ansible_distribution == "Flatcar Container Linux by Kinvolk") or (ansible_distribution == "Flatcar")) ignore_errors: true # noqa ignore-errors -- name: skopeo | Download skopeo binary +- name: Skopeo | Download skopeo binary include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.skopeo) }}" diff --git a/roles/container-engine/validate-container-engine/tasks/main.yml b/roles/container-engine/validate-container-engine/tasks/main.yml index fdd60e0e260..2221eb7e026 100644 --- a/roles/container-engine/validate-container-engine/tasks/main.yml +++ b/roles/container-engine/validate-container-engine/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: validate-container-engine | check if fedora coreos +- name: Validate-container-engine | check if fedora coreos stat: path: /run/ostree-booted get_attributes: no @@ -9,7 +9,7 @@ tags: - facts -- name: validate-container-engine | set is_ostree +- name: Validate-container-engine | set is_ostree set_fact: is_ostree: "{{ ostree.stat.exists }}" tags: diff --git a/roles/container-engine/youki/tasks/main.yml b/roles/container-engine/youki/tasks/main.yml index 1095c3d2e35..d617963df3c 100644 --- a/roles/container-engine/youki/tasks/main.yml +++ b/roles/container-engine/youki/tasks/main.yml @@ -1,10 +1,10 @@ --- -- name: youki | Download youki +- name: Youki | Download youki include_tasks: "../../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.youki) }}" -- name: youki | Copy youki binary from download dir +- name: Youki | Copy youki binary from download dir copy: src: "{{ local_release_dir }}/youki_v{{ youki_version | regex_replace('\\.', '_') }}_linux/youki-v{{ youki_version }}/youki" dest: "{{ youki_bin_dir }}/youki" diff --git a/roles/download/tasks/check_pull_required.yml b/roles/download/tasks/check_pull_required.yml index c2f9ead02ad..c0f8c72f527 100644 --- a/roles/download/tasks/check_pull_required.yml +++ b/roles/download/tasks/check_pull_required.yml @@ -1,20 +1,20 @@ --- # The image_info_command depends on the Container Runtime and will output something like the following: # nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc... -- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell +- name: Check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell shell: "{{ image_info_command }}" register: docker_images changed_when: false check_mode: no when: not download_always_pull -- name: check_pull_required | Set pull_required if the desired image is not yet loaded +- name: Check_pull_required | Set pull_required if the desired image is not yet loaded set_fact: pull_required: >- {%- if image_reponame | regex_replace('^docker\.io/(library/)?','') in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} when: not download_always_pull -- name: check_pull_required | Check that the local digest sha256 corresponds to the given image tag +- name: Check_pull_required | Check that the local digest sha256 corresponds to the given image tag assert: that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')" when: diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index 39e0e34c281..fccfd7ba515 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -1,6 +1,6 @@ --- - block: - - name: set default values for flag variables + - name: Set default values for flag variables set_fact: image_is_cached: false image_changed: false @@ -8,12 +8,12 @@ tags: - facts - - name: download_container | Set a few facts + - name: Download_container | Set a few facts import_tasks: set_container_facts.yml tags: - facts - - name: download_container | Prepare container download + - name: Download_container | Prepare container download include_tasks: check_pull_required.yml when: - not download_always_pull @@ -21,7 +21,7 @@ - debug: # noqa unnamed-task msg: "Pull {{ image_reponame }} required is: {{ pull_required }}" - - name: download_container | Determine if image is in cache + - name: Download_container | Determine if image is in cache stat: path: "{{ image_path_cached }}" get_attributes: no @@ -36,7 +36,7 @@ when: - download_force_cache - - name: download_container | Set fact indicating if image is in cache + - name: Download_container | Set fact indicating if image is in cache set_fact: image_is_cached: "{{ cache_image.stat.exists }}" tags: @@ -52,7 +52,7 @@ - download_force_cache - not download_run_once - - name: download_container | Download image if required + - name: Download_container | Download image if required command: "{{ image_pull_command_on_localhost if download_localhost else image_pull_command }} {{ image_reponame }}" delegate_to: "{{ download_delegate if download_run_once else inventory_hostname }}" delegate_facts: yes @@ -67,7 +67,7 @@ - pull_required or download_run_once - not image_is_cached - - name: download_container | Save and compress image + - name: Download_container | Save and compress image shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell delegate_to: "{{ download_delegate }}" delegate_facts: no @@ -79,7 +79,7 @@ - not image_is_cached - download_run_once - - name: download_container | Copy image to ansible host cache + - name: Download_container | Copy image to ansible host cache synchronize: src: "{{ image_path_final }}" dest: "{{ image_path_cached }}" @@ -91,7 +91,7 @@ - not download_localhost - download_delegate == inventory_hostname - - name: download_container | Upload image to node if it is cached + - name: Download_container | Upload image to node if it is cached synchronize: src: "{{ image_path_cached }}" dest: "{{ image_path_final }}" @@ -107,7 +107,7 @@ - pull_required - download_force_cache - - name: download_container | Load image into the local container registry + - name: Download_container | Load image into the local container registry shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell register: container_load_status failed_when: container_load_status is failed @@ -115,7 +115,7 @@ - pull_required - download_force_cache - - name: download_container | Remove container image from cache + - name: Download_container | Remove container image from cache file: state: absent path: "{{ image_path_final }}" diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml index 3bddd74480d..fe0dfb57c7a 100644 --- a/roles/download/tasks/download_file.yml +++ b/roles/download/tasks/download_file.yml @@ -1,21 +1,21 @@ --- - block: - - name: prep_download | Set a few facts + - name: Prep_download | Set a few facts set_fact: download_force_cache: "{{ true if download_run_once else download_force_cache }}" - - name: download_file | Starting download of file + - name: Download_file | Starting download of file debug: msg: "{{ download.url }}" run_once: "{{ download_run_once }}" - - name: download_file | Set pathname of cached file + - name: Download_file | Set pathname of cached file set_fact: file_path_cached: "{{ download_cache_dir }}/{{ download.dest | basename }}" tags: - facts - - name: download_file | Create dest directory on node + - name: Download_file | Create dest directory on node file: path: "{{ download.dest | dirname }}" owner: "{{ download.owner | default(omit) }}" @@ -23,7 +23,7 @@ state: directory recurse: yes - - name: download_file | Create local cache directory + - name: Download_file | Create local cache directory file: path: "{{ file_path_cached | dirname }}" state: directory @@ -38,7 +38,7 @@ tags: - localhost - - name: download_file | Create cache directory on download_delegate host + - name: Download_file | Create cache directory on download_delegate host file: path: "{{ file_path_cached | dirname }}" state: directory @@ -52,7 +52,7 @@ # We check a number of mirrors that may hold the file and pick a working one at random # This task will avoid logging it's parameters to not leak environment passwords in the log - - name: download_file | Validate mirrors + - name: Download_file | Validate mirrors uri: url: "{{ mirror }}" method: HEAD @@ -75,14 +75,14 @@ ignore_errors: true # Ansible 2.9 requires we convert a generator to a list - - name: download_file | Get the list of working mirrors + - name: Download_file | Get the list of working mirrors set_fact: valid_mirror_urls: "{{ uri_result.results | selectattr('failed', 'eq', False) | map(attribute='mirror') | list }}" delegate_to: "{{ download_delegate if download_force_cache else inventory_hostname }}" # This must always be called, to check if the checksum matches. On no-match the file is re-downloaded. # This task will avoid logging it's parameters to not leak environment passwords in the log - - name: download_file | Download item + - name: Download_file | Download item get_url: url: "{{ valid_mirror_urls | random }}" dest: "{{ file_path_cached if download_force_cache else download.dest }}" @@ -103,7 +103,7 @@ environment: "{{ proxy_env }}" no_log: "{{ not (unsafe_show_logs|bool) }}" - - name: download_file | Copy file back to ansible host file cache + - name: Download_file | Copy file back to ansible host file cache synchronize: src: "{{ file_path_cached }}" dest: "{{ file_path_cached }}" @@ -114,7 +114,7 @@ - not download_localhost - download_delegate == inventory_hostname - - name: download_file | Copy file from cache to nodes, if it is available + - name: Download_file | Copy file from cache to nodes, if it is available synchronize: src: "{{ file_path_cached }}" dest: "{{ download.dest }}" @@ -127,7 +127,7 @@ when: - download_force_cache - - name: download_file | Set mode and owner + - name: Download_file | Set mode and owner file: path: "{{ download.dest }}" mode: "{{ download.mode | default(omit) }}" @@ -135,7 +135,7 @@ when: - download_force_cache - - name: "download_file | Extract file archives" + - name: "Download_file | Extract file archives" include_tasks: "extract_file.yml" tags: diff --git a/roles/download/tasks/extract_file.yml b/roles/download/tasks/extract_file.yml index 81858dd3a03..219e2d7ac0c 100644 --- a/roles/download/tasks/extract_file.yml +++ b/roles/download/tasks/extract_file.yml @@ -1,5 +1,5 @@ --- -- name: extract_file | Unpacking archive +- name: Extract_file | Unpacking archive unarchive: src: "{{ download.dest }}" dest: "{{ download.dest | dirname }}" diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 536c293a7c2..27b165fe8c5 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: download | Prepare working directories and variables +- name: Download | Prepare working directories and variables import_tasks: prep_download.yml when: - not skip_downloads|default(false) @@ -7,7 +7,7 @@ - download - upload -- name: download | Get kubeadm binary and list of required images +- name: Download | Get kubeadm binary and list of required images include_tasks: prep_kubeadm_images.yml when: - not skip_downloads|default(false) @@ -16,7 +16,7 @@ - download - upload -- name: download | Download files / images +- name: Download | Download files / images include_tasks: "{{ include_file }}" loop: "{{ downloads | combine(kubeadm_images) | dict2items }}" vars: diff --git a/roles/download/tasks/prep_download.yml b/roles/download/tasks/prep_download.yml index 9419f24aac9..68dc7d86b74 100644 --- a/roles/download/tasks/prep_download.yml +++ b/roles/download/tasks/prep_download.yml @@ -1,11 +1,11 @@ --- -- name: prep_download | Set a few facts +- name: Prep_download | Set a few facts set_fact: download_force_cache: "{{ true if download_run_once else download_force_cache }}" tags: - facts -- name: prep_download | On localhost, check if passwordless root is possible +- name: Prep_download | On localhost, check if passwordless root is possible command: "true" delegate_to: localhost connection: local @@ -20,7 +20,7 @@ - localhost - asserts -- name: prep_download | On localhost, check if user has access to the container runtime without using sudo +- name: Prep_download | On localhost, check if user has access to the container runtime without using sudo shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell delegate_to: localhost connection: local @@ -35,7 +35,7 @@ - localhost - asserts -- name: prep_download | Parse the outputs of the previous commands +- name: Prep_download | Parse the outputs of the previous commands set_fact: user_in_docker_group: "{{ not test_docker.failed }}" user_can_become_root: "{{ not test_become.failed }}" @@ -45,7 +45,7 @@ - localhost - asserts -- name: prep_download | Check that local user is in group or can become root +- name: Prep_download | Check that local user is in group or can become root assert: that: "user_in_docker_group or user_can_become_root" msg: >- @@ -56,7 +56,7 @@ - localhost - asserts -- name: prep_download | Register docker images info +- name: Prep_download | Register docker images info shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell no_log: "{{ not (unsafe_show_logs|bool) }}" register: docker_images @@ -65,7 +65,7 @@ check_mode: no when: download_container -- name: prep_download | Create staging directory on remote node +- name: Prep_download | Create staging directory on remote node file: path: "{{ local_release_dir }}/images" state: directory @@ -75,7 +75,7 @@ when: - ansible_os_family not in ["Flatcar", "Flatcar Container Linux by Kinvolk"] -- name: prep_download | Create local cache for files and images on control node +- name: Prep_download | Create local cache for files and images on control node file: path: "{{ download_cache_dir }}/images" state: directory diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml index aa21849e08f..abe7da9d136 100644 --- a/roles/download/tasks/prep_kubeadm_images.yml +++ b/roles/download/tasks/prep_kubeadm_images.yml @@ -1,12 +1,12 @@ --- -- name: prep_kubeadm_images | Check kubeadm version matches kubernetes version +- name: Prep_kubeadm_images | Check kubeadm version matches kubernetes version fail: msg: "Kubeadm version {{ kubeadm_version }} do not matches kubernetes {{ kube_version }}" when: - not skip_downloads | default(false) - not kubeadm_version == downloads.kubeadm.version -- name: prep_kubeadm_images | Download kubeadm binary +- name: Prep_kubeadm_images | Download kubeadm binary include_tasks: "download_file.yml" vars: download: "{{ download_defaults | combine(downloads.kubeadm) }}" @@ -14,7 +14,7 @@ - not skip_downloads | default(false) - downloads.kubeadm.enabled -- name: prep_kubeadm_images | Create kubeadm config +- name: Prep_kubeadm_images | Create kubeadm config template: src: "kubeadm-images.yaml.j2" dest: "{{ kube_config_dir }}/kubeadm-images.yaml" @@ -22,20 +22,20 @@ when: - not skip_kubeadm_images|default(false) -- name: prep_kubeadm_images | Copy kubeadm binary from download dir to system path +- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path copy: src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" dest: "{{ bin_dir }}/kubeadm" mode: 0755 remote_src: true -- name: prep_kubeadm_images | Set kubeadm binary permissions +- name: Prep_kubeadm_images | Set kubeadm binary permissions file: path: "{{ bin_dir }}/kubeadm" mode: "0755" state: file -- name: prep_kubeadm_images | Generate list of required images +- name: Prep_kubeadm_images | Generate list of required images shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'" args: executable: /bin/bash @@ -45,7 +45,7 @@ when: - not skip_kubeadm_images|default(false) -- name: prep_kubeadm_images | Parse list of images +- name: Prep_kubeadm_images | Parse list of images vars: kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}" set_fact: @@ -63,7 +63,7 @@ when: - not skip_kubeadm_images|default(false) -- name: prep_kubeadm_images | Convert list of images to dict for later use +- name: Prep_kubeadm_images | Convert list of images to dict for later use set_fact: kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}" run_once: true diff --git a/roles/download/tasks/set_container_facts.yml b/roles/download/tasks/set_container_facts.yml index 9d36c248459..5b93f295359 100644 --- a/roles/download/tasks/set_container_facts.yml +++ b/roles/download/tasks/set_container_facts.yml @@ -1,22 +1,22 @@ --- -- name: set_container_facts | Display the name of the image being processed +- name: Set_container_facts | Display the name of the image being processed debug: msg: "{{ download.repo }}" -- name: set_container_facts | Set if containers should be pulled by digest +- name: Set_container_facts | Set if containers should be pulled by digest set_fact: pull_by_digest: "{{ download.sha256 is defined and download.sha256 }}" -- name: set_container_facts | Define by what name to pull the image +- name: Set_container_facts | Define by what name to pull the image set_fact: image_reponame: >- {%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%} -- name: set_container_facts | Define file name of image +- name: Set_container_facts | Define file name of image set_fact: image_filename: "{{ image_reponame | regex_replace('/|\0|:', '_') }}.tar" -- name: set_container_facts | Define path of image +- name: Set_container_facts | Define path of image set_fact: image_path_cached: "{{ download_cache_dir }}/images/{{ image_filename }}" image_path_final: "{{ local_release_dir }}/images/{{ image_filename }}" diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index ccf8f8f64e1..d1c439be0d2 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: restart etcd +- name: Restart etcd command: /bin/true notify: - Backup etcd data @@ -8,7 +8,7 @@ - wait for etcd up - Cleanup etcd backups -- name: restart etcd-events +- name: Restart etcd-events command: /bin/true notify: - etcd | reload systemd @@ -17,23 +17,23 @@ - import_tasks: backup.yml -- name: etcd | reload systemd +- name: Etcd | reload systemd systemd: daemon_reload: true -- name: reload etcd +- name: Reload etcd service: name: etcd state: restarted when: is_etcd_master -- name: reload etcd-events +- name: Reload etcd-events service: name: etcd-events state: restarted when: is_etcd_master -- name: wait for etcd up +- name: Wait for etcd up uri: url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs: no @@ -46,7 +46,7 @@ - import_tasks: backup_cleanup.yml -- name: wait for etcd-events up +- name: Wait for etcd-events up uri: url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health" validate_certs: no @@ -57,6 +57,6 @@ retries: 60 delay: 1 -- name: set etcd_secret_changed +- name: Set etcd_secret_changed set_fact: etcd_secret_changed: true diff --git a/roles/etcd/tasks/install_host.yml b/roles/etcd/tasks/install_host.yml index 14a75b48b85..5db79cddae6 100644 --- a/roles/etcd/tasks/install_host.yml +++ b/roles/etcd/tasks/install_host.yml @@ -20,7 +20,7 @@ - etcd_events_cluster_setup - etcd_version.lstrip('v') not in etcd_current_host_version.stdout|default('') -- name: install | Download etcd and etcdctl +- name: Install | Download etcd and etcdctl include_tasks: "../../download/tasks/download_file.yml" vars: download: "{{ download_defaults | combine(downloads.etcd) }}" @@ -29,7 +29,7 @@ - never - etcd -- name: install | Copy etcd and etcdctl binary from download dir +- name: Install | Copy etcd and etcdctl binary from download dir copy: src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-{{ host_architecture }}/{{ item }}" dest: "{{ bin_dir }}/{{ item }}" diff --git a/roles/kubernetes-apps/argocd/tasks/main.yml b/roles/kubernetes-apps/argocd/tasks/main.yml index 709067b7f1d..e1edfcdbeb7 100644 --- a/roles/kubernetes-apps/argocd/tasks/main.yml +++ b/roles/kubernetes-apps/argocd/tasks/main.yml @@ -17,9 +17,9 @@ - name: Kubernetes Apps | Set ArgoCD template list set_fact: argocd_templates: - - name: namespace + - name: Namespace file: argocd-namespace.yml - - name: install + - name: Install file: argocd-install.yml namespace: "{{ argocd_namespace }}" url: "https://raw.githubusercontent.com/argoproj/argo-cd/{{ argocd_version }}/manifests/install.yaml" diff --git a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml index bb0161429b1..60bca993139 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml @@ -1,7 +1,7 @@ --- - include_tasks: vsphere-credentials-check.yml -- name: vSphere CSI Driver | Generate CSI cloud-config +- name: VSphere CSI Driver | Generate CSI cloud-config template: src: "{{ item }}.j2" dest: "{{ kube_config_dir }}/{{ item }}" @@ -10,7 +10,7 @@ - vsphere-csi-cloud-config when: inventory_hostname == groups['kube_control_plane'][0] -- name: vSphere CSI Driver | Generate Manifests +- name: VSphere CSI Driver | Generate Manifests template: src: "{{ item }}.j2" dest: "{{ kube_config_dir }}/{{ item }}" @@ -27,7 +27,7 @@ register: vsphere_csi_manifests when: inventory_hostname == groups['kube_control_plane'][0] -- name: vSphere CSI Driver | Apply Manifests +- name: VSphere CSI Driver | Apply Manifests kube: kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/{{ item.item }}" @@ -40,13 +40,13 @@ loop_control: label: "{{ item.item }}" -- name: vSphere CSI Driver | Generate a CSI secret manifest +- name: VSphere CSI Driver | Generate a CSI secret manifest command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n {{ vsphere_csi_namespace }} --dry-run --save-config -o yaml" register: vsphere_csi_secret_manifest when: inventory_hostname == groups['kube_control_plane'][0] no_log: "{{ not (unsafe_show_logs|bool) }}" -- name: vSphere CSI Driver | Apply a CSI secret manifest +- name: VSphere CSI Driver | Apply a CSI secret manifest command: cmd: "{{ kubectl }} apply -f -" stdin: "{{ vsphere_csi_secret_manifest.stdout }}" diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml index 25f9a713265..9e8330ea9f4 100644 --- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: kube-router | Start Resources +- name: Kube-router | Start Resources kube: name: "kube-router" kubectl: "{{ bin_dir }}/kubectl" @@ -11,7 +11,7 @@ delegate_to: "{{ groups['kube_control_plane'] | first }}" run_once: true -- name: kube-router | Wait for kube-router pods to be ready +- name: Kube-router | Wait for kube-router pods to be ready command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors register: pods_not_ready until: pods_not_ready.stdout.find("kube-router")==-1 diff --git a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml index 8663e8a247f..e6da2920a50 100644 --- a/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml +++ b/roles/kubernetes-apps/snapshots/snapshot-controller/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: check if snapshot namespace exists +- name: Check if snapshot namespace exists register: snapshot_namespace_exists kube: kubectl: "{{ bin_dir }}/kubectl" diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index cb9e81e79fb..690bcaca6ca 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -98,7 +98,7 @@ run_once: yes when: kubectl_localhost -- name: create helper script kubectl.sh on ansible host +- name: Create helper script kubectl.sh on ansible host copy: content: | #!/bin/bash diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml index a4869fec8bf..4d7fac6940d 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-secondary.yml @@ -46,7 +46,7 @@ timeout: 180 -- name: check already run +- name: Check already run debug: msg: "{{ kubeadm_already_run.stat.exists }}" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 097fb0f4403..bc298fccdb1 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -10,7 +10,7 @@ - kube_oidc_auth - kube_oidc_ca_cert is defined -- name: kubeadm | Check if kubeadm has already run +- name: Kubeadm | Check if kubeadm has already run stat: path: "/var/lib/kubelet/config.yaml" get_attributes: no @@ -18,12 +18,12 @@ get_mime: no register: kubeadm_already_run -- name: kubeadm | Backup kubeadm certs / kubeconfig +- name: Kubeadm | Backup kubeadm certs / kubeconfig import_tasks: kubeadm-backup.yml when: - kubeadm_already_run.stat.exists -- name: kubeadm | aggregate all SANs +- name: Kubeadm | aggregate all SANs set_fact: apiserver_sans: "{{ (sans_base + groups['kube_control_plane'] + sans_lb + sans_lb_ip + sans_supp + sans_access_ip + sans_ip + sans_address + sans_override + sans_hostname + sans_fqdn + sans_kube_vip_address) | unique }}" vars: @@ -69,7 +69,7 @@ when: kubernetes_audit_webhook|default(false) # Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint. -- name: set kubeadm_config_api_fqdn define +- name: Set kubeadm_config_api_fqdn define set_fact: kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}" when: loadbalancer_apiserver is defined @@ -78,27 +78,27 @@ set_fact: kubeadmConfig_api_version: v1beta3 -- name: kubeadm | Create kubeadm config +- name: Kubeadm | Create kubeadm config template: src: "kubeadm-config.{{ kubeadmConfig_api_version }}.yaml.j2" dest: "{{ kube_config_dir }}/kubeadm-config.yaml" mode: 0640 -- name: kubeadm | Create directory to store admission control configurations +- name: Kubeadm | Create directory to store admission control configurations file: path: "{{ kube_config_dir }}/admission-controls" state: directory mode: 0640 when: kube_apiserver_admission_control_config_file -- name: kubeadm | Push admission control config file +- name: Kubeadm | Push admission control config file template: src: "admission-controls.yaml.j2" dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml" mode: 0640 when: kube_apiserver_admission_control_config_file -- name: kubeadm | Push admission control config files +- name: Kubeadm | Push admission control config files template: src: "{{ item|lower }}.yaml.j2" dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml" @@ -108,15 +108,15 @@ - item in kube_apiserver_admission_plugins_needs_configuration loop: "{{ kube_apiserver_enable_admission_plugins }}" -- name: kubeadm | Check apiserver.crt SANs +- name: Kubeadm | Check apiserver.crt SANs block: - - name: kubeadm | Check apiserver.crt SAN IPs + - name: Kubeadm | Check apiserver.crt SAN IPs command: cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkip {{ item }}" loop: "{{ apiserver_ips }}" register: apiserver_sans_ip_check changed_when: apiserver_sans_ip_check.stdout is not search('does match certificate') - - name: kubeadm | Check apiserver.crt SAN hosts + - name: Kubeadm | Check apiserver.crt SAN hosts command: cmd: "openssl x509 -noout -in {{ kube_cert_dir }}/apiserver.crt -checkhost {{ item }}" loop: "{{ apiserver_hosts }}" @@ -129,7 +129,7 @@ - kubeadm_already_run.stat.exists - not kube_external_ca_mode -- name: kubeadm | regenerate apiserver cert 1/2 +- name: Kubeadm | regenerate apiserver cert 1/2 file: state: absent path: "{{ kube_cert_dir }}/{{ item }}" @@ -141,7 +141,7 @@ - apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed - not kube_external_ca_mode -- name: kubeadm | regenerate apiserver cert 2/2 +- name: Kubeadm | regenerate apiserver cert 2/2 command: >- {{ bin_dir }}/kubeadm init phase certs apiserver @@ -151,14 +151,14 @@ - apiserver_sans_ip_check.changed or apiserver_sans_host_check.changed - not kube_external_ca_mode -- name: kubeadm | Create directory to store kubeadm patches +- name: Kubeadm | Create directory to store kubeadm patches file: path: "{{ kubeadm_patches.dest_dir }}" state: directory mode: 0640 when: kubeadm_patches is defined and kubeadm_patches.enabled -- name: kubeadm | Copy kubeadm patches from inventory files +- name: Kubeadm | Copy kubeadm patches from inventory files copy: src: "{{ kubeadm_patches.source_dir }}/" dest: "{{ kubeadm_patches.dest_dir }}" @@ -166,7 +166,7 @@ mode: 0644 when: kubeadm_patches is defined and kubeadm_patches.enabled -- name: kubeadm | Initialize first master +- name: Kubeadm | Initialize first master command: >- timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }} {{ bin_dir }}/kubeadm init @@ -184,7 +184,7 @@ PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" notify: Master | restart kubelet -- name: set kubeadm certificate key +- name: Set kubeadm certificate key set_fact: kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}" with_items: "{{ hostvars[groups['kube_control_plane'][0]]['kubeadm_init'].stdout_lines | default([]) }}" @@ -229,17 +229,17 @@ - podsecuritypolicy_enabled - inventory_hostname == first_kube_control_plane -- name: kubeadm | Join other masters +- name: Kubeadm | Join other masters include_tasks: kubeadm-secondary.yml -- name: kubeadm | upgrade kubernetes cluster +- name: Kubeadm | upgrade kubernetes cluster include_tasks: kubeadm-upgrade.yml when: - upgrade_cluster_setup - kubeadm_already_run.stat.exists # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. -- name: kubeadm | Remove taint for master with node role +- name: Kubeadm | Remove taint for master with node role command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}" delegate_to: "{{ first_kube_control_plane }}" with_items: diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index 4a0043ef29d..12ab0b93469 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -1,5 +1,5 @@ --- -- name: kubeadm | Check api is up +- name: Kubeadm | Check api is up uri: url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz" validate_certs: false @@ -9,7 +9,7 @@ delay: 5 until: _result.status == 200 -- name: kubeadm | Upgrade first master +- name: Kubeadm | Upgrade first master command: >- timeout -k 600s 600s {{ bin_dir }}/kubeadm @@ -31,7 +31,7 @@ PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" notify: Master | restart kubelet -- name: kubeadm | Upgrade other masters +- name: Kubeadm | Upgrade other masters command: >- timeout -k 600s 600s {{ bin_dir }}/kubeadm @@ -53,7 +53,7 @@ PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" notify: Master | restart kubelet -- name: kubeadm | clean kubectl cache to refresh api types +- name: Kubeadm | clean kubectl cache to refresh api types file: path: "{{ item }}" state: absent @@ -62,7 +62,7 @@ - /root/.kube/http-cache # FIXME: https://github.com/kubernetes/kubeadm/issues/1318 -- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode +- name: Kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode command: >- {{ kubectl }} -n kube-system diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index a3cc8620f61..ed7d4c8160a 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -64,14 +64,14 @@ mode: 0640 when: not is_kube_master -- name: kubeadm | Create directory to store kubeadm patches +- name: Kubeadm | Create directory to store kubeadm patches file: path: "{{ kubeadm_patches.dest_dir }}" state: directory mode: 0640 when: kubeadm_patches is defined and kubeadm_patches.enabled -- name: kubeadm | Copy kubeadm patches from inventory files +- name: Kubeadm | Copy kubeadm patches from inventory files copy: src: "{{ kubeadm_patches.source_dir }}/" dest: "{{ kubeadm_patches.dest_dir }}" diff --git a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml index 62337fc2967..6b065f80415 100644 --- a/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml +++ b/roles/kubernetes/node/tasks/cloud-credentials/azure-credential-check.yml @@ -1,82 +1,82 @@ --- -- name: check azure_tenant_id value +- name: Check azure_tenant_id value fail: msg: "azure_tenant_id is missing" when: azure_tenant_id is not defined or not azure_tenant_id -- name: check azure_subscription_id value +- name: Check azure_subscription_id value fail: msg: "azure_subscription_id is missing" when: azure_subscription_id is not defined or not azure_subscription_id -- name: check azure_aad_client_id value +- name: Check azure_aad_client_id value fail: msg: "azure_aad_client_id is missing" when: azure_aad_client_id is not defined or not azure_aad_client_id -- name: check azure_aad_client_secret value +- name: Check azure_aad_client_secret value fail: msg: "azure_aad_client_secret is missing" when: azure_aad_client_secret is not defined or not azure_aad_client_secret -- name: check azure_resource_group value +- name: Check azure_resource_group value fail: msg: "azure_resource_group is missing" when: azure_resource_group is not defined or not azure_resource_group -- name: check azure_location value +- name: Check azure_location value fail: msg: "azure_location is missing" when: azure_location is not defined or not azure_location -- name: check azure_subnet_name value +- name: Check azure_subnet_name value fail: msg: "azure_subnet_name is missing" when: azure_subnet_name is not defined or not azure_subnet_name -- name: check azure_security_group_name value +- name: Check azure_security_group_name value fail: msg: "azure_security_group_name is missing" when: azure_security_group_name is not defined or not azure_security_group_name -- name: check azure_vnet_name value +- name: Check azure_vnet_name value fail: msg: "azure_vnet_name is missing" when: azure_vnet_name is not defined or not azure_vnet_name -- name: check azure_vnet_resource_group value +- name: Check azure_vnet_resource_group value fail: msg: "azure_vnet_resource_group is missing" when: azure_vnet_resource_group is not defined or not azure_vnet_resource_group -- name: check azure_route_table_name value +- name: Check azure_route_table_name value fail: msg: "azure_route_table_name is missing" when: azure_route_table_name is not defined or not azure_route_table_name -- name: check azure_loadbalancer_sku value +- name: Check azure_loadbalancer_sku value fail: msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'" when: azure_loadbalancer_sku not in ["basic", "standard"] -- name: "check azure_exclude_master_from_standard_lb is a bool" +- name: "Check azure_exclude_master_from_standard_lb is a bool" assert: that: azure_exclude_master_from_standard_lb |type_debug == 'bool' -- name: "check azure_disable_outbound_snat is a bool" +- name: "Check azure_disable_outbound_snat is a bool" assert: that: azure_disable_outbound_snat |type_debug == 'bool' -- name: "check azure_use_instance_metadata is a bool" +- name: "Check azure_use_instance_metadata is a bool" assert: that: azure_use_instance_metadata |type_debug == 'bool' -- name: check azure_vmtype value +- name: Check azure_vmtype value fail: msg: "azure_vmtype is missing. Supported values are 'standard' or 'vmss'" when: azure_vmtype is not defined or not azure_vmtype -- name: check azure_cloud value +- name: Check azure_cloud value fail: msg: "azure_cloud has an invalid value '{{ azure_cloud }}'. Supported values are 'AzureChinaCloud', 'AzureGermanCloud', 'AzurePublicCloud', 'AzureUSGovernmentCloud'." when: azure_cloud not in ["AzureChinaCloud", "AzureGermanCloud", "AzurePublicCloud", "AzureUSGovernmentCloud"] diff --git a/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml index 6ff17325fa3..7354d43af61 100644 --- a/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml +++ b/roles/kubernetes/node/tasks/cloud-credentials/openstack-credential-check.yml @@ -1,32 +1,32 @@ --- -- name: check openstack_auth_url value +- name: Check openstack_auth_url value fail: msg: "openstack_auth_url is missing" when: openstack_auth_url is not defined or not openstack_auth_url -- name: check openstack_username value +- name: Check openstack_username value fail: msg: "openstack_username is missing" when: openstack_username is not defined or not openstack_username -- name: check openstack_password value +- name: Check openstack_password value fail: msg: "openstack_password is missing" when: openstack_password is not defined or not openstack_password -- name: check openstack_region value +- name: Check openstack_region value fail: msg: "openstack_region is missing" when: openstack_region is not defined or not openstack_region -- name: check openstack_tenant_id value +- name: Check openstack_tenant_id value fail: msg: "one of openstack_tenant_id or openstack_trust_id must be specified" when: - openstack_tenant_id is not defined or not openstack_tenant_id - openstack_trust_id is not defined -- name: check openstack_trust_id value +- name: Check openstack_trust_id value fail: msg: "one of openstack_tenant_id or openstack_trust_id must be specified" when: diff --git a/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml index 873eb71c3b2..1c0fd407232 100644 --- a/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml +++ b/roles/kubernetes/node/tasks/cloud-credentials/vsphere-credential-check.yml @@ -1,22 +1,22 @@ --- -- name: check vsphere environment variables +- name: Check vsphere environment variables fail: msg: "{{ item.name }} is missing" when: item.value is not defined or not item.value with_items: - - name: vsphere_vcenter_ip + - name: Vsphere_vcenter_ip value: "{{ vsphere_vcenter_ip }}" - - name: vsphere_vcenter_port + - name: Vsphere_vcenter_port value: "{{ vsphere_vcenter_port }}" - - name: vsphere_user + - name: Vsphere_user value: "{{ vsphere_user }}" - - name: vsphere_password + - name: Vsphere_password value: "{{ vsphere_password }}" - - name: vsphere_datacenter + - name: Vsphere_datacenter value: "{{ vsphere_datacenter }}" - - name: vsphere_datastore + - name: Vsphere_datastore value: "{{ vsphere_datastore }}" - - name: vsphere_working_dir + - name: Vsphere_working_dir value: "{{ vsphere_working_dir }}" - - name: vsphere_insecure + - name: Vsphere_insecure value: "{{ vsphere_insecure }}" diff --git a/roles/kubernetes/node/tasks/facts.yml b/roles/kubernetes/node/tasks/facts.yml index 97d52e8c3ee..24aebd70d0f 100644 --- a/roles/kubernetes/node/tasks/facts.yml +++ b/roles/kubernetes/node/tasks/facts.yml @@ -1,51 +1,51 @@ --- - block: - - name: look up docker cgroup driver + - name: Look up docker cgroup driver shell: "docker info | grep 'Cgroup Driver' | awk -F': ' '{ print $2; }'" register: docker_cgroup_driver_result changed_when: false check_mode: no - - name: set kubelet_cgroup_driver_detected fact for docker + - name: Set kubelet_cgroup_driver_detected fact for docker set_fact: kubelet_cgroup_driver_detected: "{{ docker_cgroup_driver_result.stdout }}" when: container_manager == 'docker' - block: - - name: look up crio cgroup driver + - name: Look up crio cgroup driver shell: "{{ bin_dir }}/crio-status info | grep 'cgroup driver' | awk -F': ' '{ print $2; }'" register: crio_cgroup_driver_result changed_when: false - - name: set kubelet_cgroup_driver_detected fact for crio + - name: Set kubelet_cgroup_driver_detected fact for crio set_fact: kubelet_cgroup_driver_detected: "{{ crio_cgroup_driver_result.stdout }}" when: container_manager == 'crio' -- name: set kubelet_cgroup_driver_detected fact for containerd +- name: Set kubelet_cgroup_driver_detected fact for containerd set_fact: kubelet_cgroup_driver_detected: >- {%- if containerd_use_systemd_cgroup -%}systemd{%- else -%}cgroupfs{%- endif -%} when: container_manager == 'containerd' -- name: set kubelet_cgroup_driver +- name: Set kubelet_cgroup_driver set_fact: kubelet_cgroup_driver: "{{ kubelet_cgroup_driver_detected }}" when: kubelet_cgroup_driver is undefined -- name: set kubelet_cgroups options when cgroupfs is used +- name: Set kubelet_cgroups options when cgroupfs is used set_fact: kubelet_runtime_cgroups: "{{ kubelet_runtime_cgroups_cgroupfs }}" kubelet_kubelet_cgroups: "{{ kubelet_kubelet_cgroups_cgroupfs }}" when: kubelet_cgroup_driver == 'cgroupfs' -- name: set kubelet_config_extra_args options when cgroupfs is used +- name: Set kubelet_config_extra_args options when cgroupfs is used vars: set_fact: kubelet_config_extra_args: "{{ kubelet_config_extra_args | combine(kubelet_config_extra_args_cgroupfs) }}" when: kubelet_cgroup_driver == 'cgroupfs' -- name: os specific vars +- name: Os specific vars include_vars: "{{ item }}" with_first_found: - files: diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index cf7a1d84baf..82175819b7a 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -1,5 +1,5 @@ --- -- name: install | Copy kubeadm binary from download dir +- name: Install | Copy kubeadm binary from download dir copy: src: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}" dest: "{{ bin_dir }}/kubeadm" @@ -10,7 +10,7 @@ when: - not inventory_hostname in groups['kube_control_plane'] -- name: install | Copy kubelet binary from download dir +- name: Install | Copy kubelet binary from download dir copy: src: "{{ local_release_dir }}/kubelet-{{ kube_version }}-{{ image_arch }}" dest: "{{ bin_dir }}/kubelet" diff --git a/roles/kubernetes/node/tasks/kubelet.yml b/roles/kubernetes/node/tasks/kubelet.yml index c08ef5fb814..4364d6e06b8 100644 --- a/roles/kubernetes/node/tasks/kubelet.yml +++ b/roles/kubernetes/node/tasks/kubelet.yml @@ -39,7 +39,7 @@ - kubelet - kubeadm -- name: flush_handlers and reload-systemd +- name: Flush_handlers and reload-systemd meta: flush_handlers - name: Enable kubelet diff --git a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml index c8e01081707..7e5cfceddca 100644 --- a/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml +++ b/roles/kubernetes/node/tasks/loadbalancer/haproxy.yml @@ -1,17 +1,17 @@ --- -- name: haproxy | Cleanup potentially deployed nginx-proxy +- name: Haproxy | Cleanup potentially deployed nginx-proxy file: path: "{{ kube_manifest_dir }}/nginx-proxy.yml" state: absent -- name: haproxy | Make haproxy directory +- name: Haproxy | Make haproxy directory file: path: "{{ haproxy_config_dir }}" state: directory mode: 0755 owner: root -- name: haproxy | Write haproxy configuration +- name: Haproxy | Write haproxy configuration template: src: "loadbalancer/haproxy.cfg.j2" dest: "{{ haproxy_config_dir }}/haproxy.cfg" @@ -19,7 +19,7 @@ mode: 0755 backup: yes -- name: haproxy | Get checksum from config +- name: Haproxy | Get checksum from config stat: path: "{{ haproxy_config_dir }}/haproxy.cfg" get_attributes: no @@ -27,7 +27,7 @@ get_mime: no register: haproxy_stat -- name: haproxy | Write static pod +- name: Haproxy | Write static pod template: src: manifests/haproxy.manifest.j2 dest: "{{ kube_manifest_dir }}/haproxy.yml" diff --git a/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml b/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml index e12bd9bfce7..f7b04a624bd 100644 --- a/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml +++ b/roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml @@ -1,12 +1,12 @@ --- -- name: kube-vip | Check cluster settings for kube-vip +- name: Kube-vip | Check cluster settings for kube-vip fail: msg: "kube-vip require kube_proxy_strict_arp = true, see https://github.com/kube-vip/kube-vip/blob/main/docs/kubernetes/arp/index.md" when: - kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp - kube_vip_arp_enabled -- name: kube-vip | Write static pod +- name: Kube-vip | Write static pod template: src: manifests/kube-vip.manifest.j2 dest: "{{ kube_manifest_dir }}/kube-vip.yml" diff --git a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml index e176cb9767a..5b82ff6206d 100644 --- a/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml +++ b/roles/kubernetes/node/tasks/loadbalancer/nginx-proxy.yml @@ -1,17 +1,17 @@ --- -- name: haproxy | Cleanup potentially deployed haproxy +- name: Haproxy | Cleanup potentially deployed haproxy file: path: "{{ kube_manifest_dir }}/haproxy.yml" state: absent -- name: nginx-proxy | Make nginx directory +- name: Nginx-proxy | Make nginx directory file: path: "{{ nginx_config_dir }}" state: directory mode: 0700 owner: root -- name: nginx-proxy | Write nginx-proxy configuration +- name: Nginx-proxy | Write nginx-proxy configuration template: src: "loadbalancer/nginx.conf.j2" dest: "{{ nginx_config_dir }}/nginx.conf" @@ -19,7 +19,7 @@ mode: 0755 backup: yes -- name: nginx-proxy | Get checksum from config +- name: Nginx-proxy | Get checksum from config stat: path: "{{ nginx_config_dir }}/nginx.conf" get_attributes: no @@ -27,7 +27,7 @@ get_mime: no register: nginx_stat -- name: nginx-proxy | Write static pod +- name: Nginx-proxy | Write static pod template: src: manifests/nginx-proxy.manifest.j2 dest: "{{ kube_manifest_dir }}/nginx-proxy.yml" diff --git a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml index 4ea91d83e7e..02fe5264dcf 100644 --- a/roles/kubernetes/preinstall/tasks/0010-swapoff.yml +++ b/roles/kubernetes/preinstall/tasks/0010-swapoff.yml @@ -9,7 +9,7 @@ - none # kubelet fails even if ansible_swaptotal_mb = 0 -- name: check swap +- name: Check swap command: /sbin/swapon -s register: swapon changed_when: no diff --git a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml index ca430cac45b..1a69708f969 100644 --- a/roles/kubernetes/preinstall/tasks/0020-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0020-set_facts.yml @@ -21,7 +21,7 @@ tags: - facts -- name: check if booted with ostree +- name: Check if booted with ostree stat: path: /run/ostree-booted get_attributes: no @@ -29,7 +29,7 @@ get_mime: no register: ostree -- name: set is_fedora_coreos +- name: Set is_fedora_coreos lineinfile: path: /etc/os-release line: "VARIANT_ID=coreos" @@ -38,18 +38,18 @@ register: os_variant_coreos changed_when: false -- name: set is_fedora_coreos +- name: Set is_fedora_coreos set_fact: is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}" -- name: check resolvconf +- name: Check resolvconf command: which resolvconf register: resolvconf failed_when: false changed_when: false check_mode: no -- name: check existence of /etc/resolvconf/resolv.conf.d +- name: Check existence of /etc/resolvconf/resolv.conf.d stat: path: /etc/resolvconf/resolv.conf.d get_attributes: no @@ -58,7 +58,7 @@ failed_when: false register: resolvconfd_path -- name: check status of /etc/resolv.conf +- name: Check status of /etc/resolv.conf stat: path: /etc/resolv.conf follow: no @@ -70,12 +70,12 @@ - block: - - name: get content of /etc/resolv.conf + - name: Get content of /etc/resolv.conf slurp: src: /etc/resolv.conf register: resolvconf_slurp - - name: get currently configured nameservers + - name: Get currently configured nameservers set_fact: configured_nameservers: "{{ resolvconf_slurp.content | b64decode | regex_findall('^nameserver\\s*(.*)', multiline=True) | ipaddr }}" when: resolvconf_slurp.content is defined @@ -100,7 +100,7 @@ changed_when: false check_mode: false -- name: check systemd-resolved +- name: Check systemd-resolved # noqa 303 Should we use service_facts for this? command: systemctl is-active systemd-resolved register: systemd_resolved_enabled @@ -108,12 +108,12 @@ changed_when: false check_mode: no -- name: set default dns if remove_default_searchdomains is false +- name: Set default dns if remove_default_searchdomains is false set_fact: default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) -- name: set dns facts +- name: Set dns facts set_fact: resolvconf: >- {%- if resolvconf.rc == 0 and resolvconfd_path.stat.isdir is defined and resolvconfd_path.stat.isdir -%}true{%- else -%}false{%- endif -%} @@ -125,7 +125,7 @@ ['169.254.169.253'] if cloud_provider is defined and cloud_provider == 'aws' else [] }}" -- name: check if kubelet is configured +- name: Check if kubelet is configured stat: path: "{{ kube_config_dir }}/kubelet.env" get_attributes: no @@ -134,11 +134,11 @@ register: kubelet_configured changed_when: false -- name: check if early DNS configuration stage +- name: Check if early DNS configuration stage set_fact: dns_early: "{{ not kubelet_configured.stat.exists }}" -- name: target resolv.conf files +- name: Target resolv.conf files set_fact: resolvconffile: /etc/resolv.conf base: >- @@ -147,12 +147,12 @@ {%- if resolvconf|bool -%}/etc/resolvconf/resolv.conf.d/head{%- endif -%} when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos -- name: target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS) +- name: Target temporary resolvconf cloud init file (Flatcar Container Linux by Kinvolk / Fedora CoreOS) set_fact: resolvconffile: /tmp/resolveconf_cloud_init_conf when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] or is_fedora_coreos -- name: check if /etc/dhclient.conf exists +- name: Check if /etc/dhclient.conf exists stat: path: /etc/dhclient.conf get_attributes: no @@ -160,12 +160,12 @@ get_mime: no register: dhclient_stat -- name: target dhclient conf file for /etc/dhclient.conf +- name: Target dhclient conf file for /etc/dhclient.conf set_fact: dhclientconffile: /etc/dhclient.conf when: dhclient_stat.stat.exists -- name: check if /etc/dhcp/dhclient.conf exists +- name: Check if /etc/dhcp/dhclient.conf exists stat: path: /etc/dhcp/dhclient.conf get_attributes: no @@ -173,22 +173,22 @@ get_mime: no register: dhcp_dhclient_stat -- name: target dhclient conf file for /etc/dhcp/dhclient.conf +- name: Target dhclient conf file for /etc/dhcp/dhclient.conf set_fact: dhclientconffile: /etc/dhcp/dhclient.conf when: dhcp_dhclient_stat.stat.exists -- name: target dhclient hook file for Red Hat family +- name: Target dhclient hook file for Red Hat family set_fact: dhclienthookfile: /etc/dhcp/dhclient.d/zdnsupdate.sh when: ansible_os_family == "RedHat" -- name: target dhclient hook file for Debian family +- name: Target dhclient hook file for Debian family set_fact: dhclienthookfile: /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate when: ansible_os_family == "Debian" -- name: generate search domains to resolvconf +- name: Generate search domains to resolvconf set_fact: searchentries: search {{ (default_searchdomains|default([]) + searchdomains|default([])) | join(' ') }} @@ -199,7 +199,7 @@ supersede_domain: supersede domain-name "{{ dns_domain }}"; -- name: pick coredns cluster IP or default resolver +- name: Pick coredns cluster IP or default resolver set_fact: coredns_server: |- {%- if dns_mode == 'coredns' and not dns_early|bool -%} @@ -215,7 +215,7 @@ {%- endif -%} # This task should only run after cluster/nodelocal DNS is up, otherwise all DNS lookups will timeout -- name: generate nameservers for resolvconf, including cluster DNS +- name: Generate nameservers for resolvconf, including cluster DNS set_fact: nameserverentries: |- {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }} @@ -225,7 +225,7 @@ # This task should run instead of the above task when cluster/nodelocal DNS hasn't # been deployed yet (like scale.yml/cluster.yml) or when it's down (reset.yml) -- name: generate nameservers for resolvconf, not including cluster DNS +- name: Generate nameservers for resolvconf, not including cluster DNS set_fact: nameserverentries: |- {{ ( nameservers|d([]) + cloud_resolver|d([]) + configured_nameservers|d([])) | unique | join(',') }} @@ -233,7 +233,7 @@ supersede domain-name-servers {{ ( nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; when: dns_early and not dns_late -- name: gather os specific variables +- name: Gather os specific variables include_vars: "{{ item }}" with_first_found: - files: @@ -247,7 +247,7 @@ - ../vars skip: true -- name: set etcd vars if using kubeadm mode +- name: Set etcd vars if using kubeadm mode set_fact: etcd_cert_dir: "{{ kube_cert_dir }}" kube_etcd_cacert_file: "etcd/ca.crt" @@ -256,7 +256,7 @@ when: - etcd_deployment_type == "kubeadm" -- name: check /usr readonly +- name: Check /usr readonly stat: path: "/usr" get_attributes: no @@ -264,7 +264,7 @@ get_mime: no register: usr -- name: set alternate flexvolume path +- name: Set alternate flexvolume path set_fact: kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volumeplugins when: not usr.stat.writeable diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml index 514f3215d9e..ee315de4dc8 100644 --- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml @@ -150,7 +150,7 @@ msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character" when: not ignore_assert_errors -- name: check cloud_provider value +- name: Check cloud_provider value assert: that: cloud_provider in ['gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external'] msg: "If set the 'cloud_provider' var must be set either to 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci' or 'external'" diff --git a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml index 4397cdd63a1..fa809c9d15b 100644 --- a/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml +++ b/roles/kubernetes/preinstall/tasks/0060-resolvconf.yml @@ -1,5 +1,5 @@ --- -- name: create temporary resolveconf cloud init file +- name: Create temporary resolveconf cloud init file command: cp -f /etc/resolv.conf "{{ resolvconffile }}" when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] @@ -43,12 +43,12 @@ - [ 'search\s', 'nameserver\s', 'domain\s', 'options\s' ] notify: Preinstall | propagate resolvconf to k8s components -- name: get temporary resolveconf cloud init file content +- name: Get temporary resolveconf cloud init file content command: cat {{ resolvconffile }} register: cloud_config when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] -- name: persist resolvconf cloud init file +- name: Persist resolvconf cloud init file template: dest: "{{ resolveconf_cloud_init_conf }}" src: resolvconf.j2 diff --git a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml index 7249ac898d1..76513208ae2 100644 --- a/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml +++ b/roles/kubernetes/preinstall/tasks/0063-networkmanager-dns.yml @@ -9,7 +9,7 @@ backup: yes notify: Preinstall | update resolvconf for networkmanager -- name: set default dns if remove_default_searchdomains is false +- name: Set default dns if remove_default_searchdomains is false set_fact: default_searchdomains: ["default.svc.{{ dns_domain }}", "svc.{{ dns_domain }}"] when: not remove_default_searchdomains|default()|bool or (remove_default_searchdomains|default()|bool and searchdomains|default([])|length==0) diff --git a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml index 598399b9bc4..af63d742902 100644 --- a/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml +++ b/roles/kubernetes/preinstall/tasks/0120-growpart-azure-centos-7.yml @@ -2,7 +2,7 @@ # Running growpart seems to be only required on Azure, as other Cloud Providers do this at boot time -- name: install growpart +- name: Install growpart package: name: cloud-utils-growpart state: present @@ -20,7 +20,7 @@ partition: "{{ _root_device | first | regex_replace('[^0-9]+([0-9]+)', '\\1') }}" root_device: "{{ _root_device }}" -- name: check if growpart needs to be run +- name: Check if growpart needs to be run command: growpart -N {{ device }} {{ partition }} failed_when: False changed_when: "'NOCHANGE:' not in growpart_needed.stdout" @@ -28,17 +28,17 @@ environment: LC_ALL: C -- name: check fs type +- name: Check fs type command: file -Ls {{ root_device }} changed_when: False register: fs_type -- name: run growpart # noqa 503 +- name: Run growpart # noqa 503 command: growpart {{ device }} {{ partition }} when: growpart_needed.changed environment: LC_ALL: C -- name: run xfs_growfs # noqa 503 +- name: Run xfs_growfs # noqa 503 command: xfs_growfs {{ root_device }} when: growpart_needed.changed and 'XFS' in fs_type.stdout diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml index d42faee8f0a..d5464f1d045 100644 --- a/roles/kubespray-defaults/tasks/fallback_ips.yml +++ b/roles/kubespray-defaults/tasks/fallback_ips.yml @@ -14,7 +14,7 @@ run_once: yes tags: always -- name: create fallback_ips_base +- name: Create fallback_ips_base set_fact: fallback_ips_base: | --- @@ -28,6 +28,6 @@ become: no run_once: yes -- name: set fallback_ips +- name: Set fallback_ips set_fact: fallback_ips: "{{ hostvars.localhost.fallback_ips_base | from_yaml }}" diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml index b4b7af860f8..dc4bb40b49b 100644 --- a/roles/network_plugin/calico/handlers/main.yml +++ b/roles/network_plugin/calico/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: reset_calico_cni +- name: Reset_calico_cni command: /bin/true when: calico_cni_config is defined notify: @@ -7,7 +7,7 @@ - Calico | delete calico-node docker containers - Calico | delete calico-node crio/containerd containers -- name: delete 10-calico.conflist +- name: Delete 10-calico.conflist file: path: /etc/cni/net.d/10-calico.conflist state: absent diff --git a/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml b/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml index fc336e46499..e4592059ab7 100644 --- a/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml +++ b/roles/network_plugin/calico/tasks/calico_apiserver_certs.yml @@ -54,7 +54,7 @@ --from-file={{ item.cert }} --from-file={{ item.key }} with_items: - - name: calico-apiserver-certs + - name: Calico-apiserver-certs cert: /etc/calico/certs/apiserver.crt key: /etc/calico/certs/apiserver.key when: calico_apiserver_secret.rc != 0 diff --git a/roles/network_plugin/calico/tasks/reset.yml b/roles/network_plugin/calico/tasks/reset.yml index 48d2e5a00e0..8dab21462d7 100644 --- a/roles/network_plugin/calico/tasks/reset.yml +++ b/roles/network_plugin/calico/tasks/reset.yml @@ -1,5 +1,5 @@ --- -- name: reset | check vxlan.calico network device +- name: Reset | check vxlan.calico network device stat: path: /sys/class/net/vxlan.calico get_attributes: no @@ -7,11 +7,11 @@ get_mime: no register: vxlan -- name: reset | remove the network vxlan.calico device created by calico +- name: Reset | remove the network vxlan.calico device created by calico command: ip link del vxlan.calico when: vxlan.stat.exists -- name: reset | check dummy0 network device +- name: Reset | check dummy0 network device stat: path: /sys/class/net/dummy0 get_attributes: no @@ -19,11 +19,11 @@ get_mime: no register: dummy0 -- name: reset | remove the network device created by calico +- name: Reset | remove the network device created by calico command: ip link del dummy0 when: dummy0.stat.exists -- name: reset | get and remove remaining routes set by bird +- name: Reset | get and remove remaining routes set by bird shell: set -o pipefail && ip route show proto bird | xargs -i bash -c "ip route del {} proto bird " args: executable: /bin/bash diff --git a/roles/network_plugin/calico/tasks/typha_certs.yml b/roles/network_plugin/calico/tasks/typha_certs.yml index 5d3f2792128..6e65bb8f57f 100644 --- a/roles/network_plugin/calico/tasks/typha_certs.yml +++ b/roles/network_plugin/calico/tasks/typha_certs.yml @@ -42,10 +42,10 @@ --cert {{ item.cert }} --key {{ item.key }} with_items: - - name: typha-server + - name: Typha-server cert: /etc/calico/certs/typha-server.crt key: /etc/calico/certs/typha-server.key - - name: typha-client + - name: Typha-client cert: /etc/calico/certs/typha-client.crt key: /etc/calico/certs/typha-client.key when: typha_server_secret.rc != 0 diff --git a/roles/network_plugin/canal/handlers/main.yml b/roles/network_plugin/canal/handlers/main.yml index 7769b99b3c7..7c1316589df 100644 --- a/roles/network_plugin/canal/handlers/main.yml +++ b/roles/network_plugin/canal/handlers/main.yml @@ -1,14 +1,14 @@ --- -- name: reset_canal_cni +- name: Reset_canal_cni command: /bin/true notify: - delete 10-canal.conflist - delete canal-node containers -- name: delete 10-canal.conflist +- name: Delete 10-canal.conflist file: path: /etc/canal/10-canal.conflist state: absent -- name: delete canal-node containers +- name: Delete canal-node containers shell: "docker ps -af name=k8s_POD_canal-node* -q | xargs --no-run-if-empty docker rm -f" diff --git a/roles/network_plugin/cilium/tasks/reset.yml b/roles/network_plugin/cilium/tasks/reset.yml index 432df8a5c10..b578b074ec3 100644 --- a/roles/network_plugin/cilium/tasks/reset.yml +++ b/roles/network_plugin/cilium/tasks/reset.yml @@ -1,5 +1,5 @@ --- -- name: reset | check and remove devices if still present +- name: Reset | check and remove devices if still present include_tasks: reset_iface.yml vars: iface: "{{ item }}" diff --git a/roles/network_plugin/cilium/tasks/reset_iface.yml b/roles/network_plugin/cilium/tasks/reset_iface.yml index d84a065af67..e2f7c14af51 100644 --- a/roles/network_plugin/cilium/tasks/reset_iface.yml +++ b/roles/network_plugin/cilium/tasks/reset_iface.yml @@ -1,5 +1,5 @@ --- -- name: "reset | check if network device {{ iface }} is present" +- name: "Reset | check if network device {{ iface }} is present" stat: path: "/sys/class/net/{{ iface }}" get_attributes: no @@ -7,6 +7,6 @@ get_mime: no register: device_remains -- name: "reset | remove network device {{ iface }}" +- name: "Reset | remove network device {{ iface }}" command: "ip link del {{ iface }}" when: device_remains.stat.exists diff --git a/roles/network_plugin/flannel/tasks/reset.yml b/roles/network_plugin/flannel/tasks/reset.yml index 2fd86e2bd9b..03d40a0c13a 100644 --- a/roles/network_plugin/flannel/tasks/reset.yml +++ b/roles/network_plugin/flannel/tasks/reset.yml @@ -1,5 +1,5 @@ --- -- name: reset | check cni network device +- name: Reset | check cni network device stat: path: /sys/class/net/cni0 get_attributes: no @@ -7,11 +7,11 @@ get_mime: no register: cni -- name: reset | remove the network device created by the flannel +- name: Reset | remove the network device created by the flannel command: ip link del cni0 when: cni.stat.exists -- name: reset | check flannel network device +- name: Reset | check flannel network device stat: path: /sys/class/net/flannel.1 get_attributes: no @@ -19,6 +19,6 @@ get_mime: no register: flannel -- name: reset | remove the network device created by the flannel +- name: Reset | remove the network device created by the flannel command: ip link del flannel.1 when: flannel.stat.exists diff --git a/roles/network_plugin/kube-router/handlers/main.yml b/roles/network_plugin/kube-router/handlers/main.yml index 7bdfc5d4293..2d3b8a4638b 100644 --- a/roles/network_plugin/kube-router/handlers/main.yml +++ b/roles/network_plugin/kube-router/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: reset_kube_router +- name: Reset_kube_router command: /bin/true notify: - Kube-router | delete kube-router docker containers diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml index e91249f7d45..67d57a2d3d7 100644 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -1,19 +1,19 @@ --- -- name: kube-router | Add annotations on kube_control_plane +- name: Kube-router | Add annotations on kube_control_plane command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_master }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] -- name: kube-router | Add annotations on kube_node +- name: Kube-router | Add annotations on kube_node command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_node }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node'] -- name: kube-router | Add common annotations on all servers +- name: Kube-router | Add common annotations on all servers command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_all }}" diff --git a/roles/network_plugin/kube-router/tasks/main.yml b/roles/network_plugin/kube-router/tasks/main.yml index 4cc078ae7af..d62f49bdccd 100644 --- a/roles/network_plugin/kube-router/tasks/main.yml +++ b/roles/network_plugin/kube-router/tasks/main.yml @@ -1,9 +1,9 @@ --- -- name: kube-router | Create annotations +- name: Kube-router | Create annotations include: annotate.yml tags: annotate -- name: kube-router | Create config directory +- name: Kube-router | Create config directory file: path: /var/lib/kube-router state: directory @@ -11,7 +11,7 @@ recurse: true mode: 0755 -- name: kube-router | Create kubeconfig +- name: Kube-router | Create kubeconfig template: src: kubeconfig.yml.j2 dest: /var/lib/kube-router/kubeconfig @@ -20,26 +20,26 @@ notify: - reset_kube_router -- name: kube-router | Slurp cni config +- name: Kube-router | Slurp cni config slurp: src: /etc/cni/net.d/10-kuberouter.conflist register: cni_config_slurp ignore_errors: true # noqa ignore-errors -- name: kube-router | Set cni_config variable +- name: Kube-router | Set cni_config variable set_fact: cni_config: "{{ cni_config_slurp.content | b64decode | from_json }}" when: - not cni_config_slurp.failed -- name: kube-router | Set host_subnet variable +- name: Kube-router | Set host_subnet variable set_fact: host_subnet: "{{ cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | first }}" when: - cni_config is defined - cni_config | json_query('plugins[?bridge==`kube-bridge`].ipam.subnet') | length > 0 -- name: kube-router | Create cni config +- name: Kube-router | Create cni config template: src: cni-conf.json.j2 dest: /etc/cni/net.d/10-kuberouter.conflist @@ -48,12 +48,12 @@ notify: - reset_kube_router -- name: kube-router | Delete old configuration +- name: Kube-router | Delete old configuration file: path: /etc/cni/net.d/10-kuberouter.conf state: absent -- name: kube-router | Create manifest +- name: Kube-router | Create manifest template: src: kube-router.yml.j2 dest: "{{ kube_config_dir }}/kube-router.yml" diff --git a/roles/network_plugin/kube-router/tasks/reset.yml b/roles/network_plugin/kube-router/tasks/reset.yml index 7b8ad2cebae..ae9ee55c7b7 100644 --- a/roles/network_plugin/kube-router/tasks/reset.yml +++ b/roles/network_plugin/kube-router/tasks/reset.yml @@ -1,5 +1,5 @@ --- -- name: reset | check kube-dummy-if network device +- name: Reset | check kube-dummy-if network device stat: path: /sys/class/net/kube-dummy-if get_attributes: no @@ -7,11 +7,11 @@ get_mime: no register: kube_dummy_if -- name: reset | remove the network device created by kube-router +- name: Reset | remove the network device created by kube-router command: ip link del kube-dummy-if when: kube_dummy_if.stat.exists -- name: check kube-bridge exists +- name: Check kube-bridge exists stat: path: /sys/class/net/kube-bridge get_attributes: no @@ -19,10 +19,10 @@ get_mime: no register: kube_bridge_if -- name: reset | donw the network bridge create by kube-router +- name: Reset | donw the network bridge create by kube-router command: ip link set kube-bridge down when: kube_bridge_if.stat.exists -- name: reset | remove the network bridge create by kube-router +- name: Reset | remove the network bridge create by kube-router command: ip link del kube-bridge when: kube_bridge_if.stat.exists diff --git a/roles/network_plugin/ovn4nfv/tasks/main.yml b/roles/network_plugin/ovn4nfv/tasks/main.yml index da212662f00..777fd9a2d34 100644 --- a/roles/network_plugin/ovn4nfv/tasks/main.yml +++ b/roles/network_plugin/ovn4nfv/tasks/main.yml @@ -1,11 +1,11 @@ --- -- name: ovn4nfv | Label control-plane node +- name: Ovn4nfv | Label control-plane node command: >- {{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane when: - inventory_hostname == groups['kube_control_plane'][0] -- name: ovn4nfv | Create ovn4nfv-k8s manifests +- name: Ovn4nfv | Create ovn4nfv-k8s manifests template: src: "{{ item.file }}.j2" dest: "{{ kube_config_dir }}/{{ item.file }}" diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index ff8a06d8481..e34811db4b3 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: remove-node | Delete node +- name: Remove-node | Delete node command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube_control_plane']|first }}" when: diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index b45e809f8d5..5ccb08ce94d 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: remove-node | List nodes +- name: Remove-node | List nodes command: >- {{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %} register: nodes @@ -9,7 +9,7 @@ changed_when: false run_once: true -- name: remove-node | Drain node except daemonsets resource # noqa 301 +- name: Remove-node | Drain node except daemonsets resource # noqa 301 command: >- {{ kubectl }} drain --force @@ -28,7 +28,7 @@ retries: "{{ drain_retries }}" delay: "{{ drain_retry_delay_seconds }}" -- name: remove-node | Wait until Volumes will be detached from the node +- name: Remove-node | Wait until Volumes will be detached from the node command: >- {{ kubectl }} get volumeattachments -o go-template={% raw %}'{{ range .items }}{{ .spec.nodeName }}{{ "\n" }}{{ end }}'{% endraw %} register: nodes_with_volumes diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 2d41ec2a745..c5e99b91c49 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: reset | stop services +- name: Reset | stop services service: name: "{{ item }}" state: stopped @@ -11,7 +11,7 @@ tags: - services -- name: reset | remove services +- name: Reset | remove services file: path: "/etc/systemd/system/{{ item }}" state: absent @@ -30,7 +30,7 @@ - containerd - crio -- name: reset | Remove Docker +- name: Reset | Remove Docker include_role: name: container-engine/docker tasks_from: reset @@ -38,12 +38,12 @@ tags: - docker -- name: reset | systemctl daemon-reload # noqa 503 +- name: Reset | systemctl daemon-reload # noqa 503 systemd: daemon_reload: true when: services_removed.changed -- name: reset | check if crictl is present +- name: Reset | check if crictl is present stat: path: "{{ bin_dir }}/crictl" get_attributes: no @@ -51,7 +51,7 @@ get_mime: no register: crictl -- name: reset | stop all cri containers +- name: Reset | stop all cri containers shell: "set -o pipefail && {{ bin_dir }}/crictl ps -q | xargs -r {{ bin_dir }}/crictl -t 60s stop" args: executable: /bin/bash @@ -68,7 +68,7 @@ - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined ignore_errors: true # noqa ignore-errors -- name: reset | force remove all cri containers +- name: Reset | force remove all cri containers command: "{{ bin_dir }}/crictl rm -a -f" register: remove_all_cri_containers retries: 5 @@ -84,7 +84,7 @@ - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined ignore_errors: true # noqa ignore-errors -- name: reset | stop and disable crio service +- name: Reset | stop and disable crio service service: name: crio state: stopped @@ -93,13 +93,13 @@ tags: [ crio ] when: container_manager == "crio" -- name: reset | forcefully wipe CRI-O's container and image storage +- name: Reset | forcefully wipe CRI-O's container and image storage command: "crio wipe -f" failed_when: false tags: [ crio ] when: container_manager == "crio" -- name: reset | stop all cri pods +- name: Reset | stop all cri pods shell: "set -o pipefail && {{ bin_dir }}/crictl pods -q | xargs -r {{ bin_dir }}/crictl -t 60s stopp" args: executable: /bin/bash @@ -115,7 +115,7 @@ ignore_errors: true # noqa ignore-errors - block: - - name: reset | force remove all cri pods + - name: Reset | force remove all cri pods command: "{{ bin_dir }}/crictl rmp -a -f" register: remove_all_cri_containers retries: 5 @@ -128,12 +128,12 @@ - ansible_facts.services['containerd.service'] is defined or ansible_facts.services['cri-o.service'] is defined rescue: - - name: reset | force remove all cri pods (rescue) + - name: Reset | force remove all cri pods (rescue) shell: "ip netns list | cut -d' ' -f 1 | xargs -n1 ip netns delete && {{ bin_dir }}/crictl rmp -a -f" ignore_errors: true # noqa ignore-errors changed_when: true -- name: reset | stop etcd services +- name: Reset | stop etcd services service: name: "{{ item }}" state: stopped @@ -144,7 +144,7 @@ tags: - services -- name: reset | remove etcd services +- name: Reset | remove etcd services file: path: "/etc/systemd/system/{{ item }}.service" state: absent @@ -155,10 +155,10 @@ tags: - services -- name: reset | remove containerd +- name: Reset | remove containerd when: container_manager == 'containerd' block: - - name: reset | stop containerd service + - name: Reset | stop containerd service service: name: containerd state: stopped @@ -166,7 +166,7 @@ tags: - services - - name: reset | remove containerd service + - name: Reset | remove containerd service file: path: /etc/systemd/system/containerd.service state: absent @@ -174,7 +174,7 @@ tags: - services -- name: reset | gather mounted kubelet dirs # noqa 301 +- name: Reset | gather mounted kubelet dirs # noqa 301 shell: set -o pipefail && mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac args: executable: /bin/bash @@ -185,7 +185,7 @@ tags: - mounts -- name: reset | unmount kubelet dirs # noqa 301 +- name: Reset | unmount kubelet dirs # noqa 301 command: umount -f {{ item }} with_items: "{{ mounted_dirs.stdout_lines }}" register: umount_dir @@ -196,7 +196,7 @@ tags: - mounts -- name: flush iptables +- name: Flush iptables iptables: table: "{{ item }}" flush: yes @@ -209,7 +209,7 @@ tags: - iptables -- name: flush ip6tables +- name: Flush ip6tables iptables: table: "{{ item }}" flush: yes @@ -229,7 +229,7 @@ when: - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster'] -- name: reset | check kube-ipvs0 network device +- name: Reset | check kube-ipvs0 network device stat: path: /sys/class/net/kube-ipvs0 get_attributes: no @@ -237,13 +237,13 @@ get_mime: no register: kube_ipvs0 -- name: reset | Remove kube-ipvs0 +- name: Reset | Remove kube-ipvs0 command: "ip link del kube-ipvs0" when: - kube_proxy_mode == 'ipvs' - kube_ipvs0.stat.exists -- name: reset | check nodelocaldns network device +- name: Reset | check nodelocaldns network device stat: path: /sys/class/net/nodelocaldns get_attributes: no @@ -251,13 +251,13 @@ get_mime: no register: nodelocaldns_device -- name: reset | Remove nodelocaldns +- name: Reset | Remove nodelocaldns command: "ip link del nodelocaldns" when: - enable_nodelocaldns|default(false)|bool - nodelocaldns_device.stat.exists -- name: reset | Check whether /var/lib/kubelet directory exists +- name: Reset | Check whether /var/lib/kubelet directory exists stat: path: /var/lib/kubelet get_attributes: no @@ -265,7 +265,7 @@ get_mime: no register: var_lib_kubelet_directory -- name: reset | Find files/dirs with immutable flag in /var/lib/kubelet +- name: Reset | Find files/dirs with immutable flag in /var/lib/kubelet command: lsattr -laR /var/lib/kubelet become: true register: var_lib_kubelet_files_dirs_w_attrs @@ -273,7 +273,7 @@ no_log: true when: var_lib_kubelet_directory.stat.exists -- name: reset | Remove immutable flag from files/dirs in /var/lib/kubelet +- name: Reset | Remove immutable flag from files/dirs in /var/lib/kubelet file: path: "{{ filedir_path }}" state: touch @@ -286,7 +286,7 @@ filedir_path: "{{ file_dir_line.split(' ')[0] }}" when: var_lib_kubelet_directory.stat.exists -- name: reset | delete some files and directories +- name: Reset | delete some files and directories file: path: "{{ item }}" state: absent @@ -374,7 +374,7 @@ tags: - files -- name: reset | remove containerd binary files +- name: Reset | remove containerd binary files file: path: "{{ containerd_bin_dir }}/{{ item }}" state: absent @@ -394,7 +394,7 @@ tags: - files -- name: reset | remove dns settings from dhclient.conf +- name: Reset | remove dns settings from dhclient.conf blockinfile: path: "{{ item }}" state: absent @@ -407,7 +407,7 @@ - files - dns -- name: reset | remove host entries from /etc/hosts +- name: Reset | remove host entries from /etc/hosts blockinfile: path: "/etc/hosts" state: absent @@ -416,7 +416,7 @@ - files - dns -- name: reset | include file with reset tasks specific to the network_plugin if exists +- name: Reset | include file with reset tasks specific to the network_plugin if exists include_role: name: "network_plugin/{{ kube_network_plugin }}" tasks_from: reset @@ -425,7 +425,7 @@ tags: - network -- name: reset | Restart network +- name: Reset | Restart network service: name: >- {% if ansible_os_family == "RedHat" -%} diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index d1b1af0bee3..2286e9931d0 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: wait for cilium +- name: Wait for cilium when: - needs_cordoning|default(false) - kube_network_plugin == 'cilium' diff --git a/tests/cloud_playbooks/create-do.yml b/tests/cloud_playbooks/create-do.yml index 3726eb15855..11637c0a3ef 100644 --- a/tests/cloud_playbooks/create-do.yml +++ b/tests/cloud_playbooks/create-do.yml @@ -47,14 +47,14 @@ mode: default tasks: - - name: replace_test_id + - name: Replace_test_id set_fact: test_name: "{{ test_id |regex_replace('\\.', '-') }}" - - name: show vars + - name: Show vars debug: msg="{{ cloud_region }}, {{ cloud_image }}" - - name: set instance names + - name: Set instance names set_fact: instance_names: >- {%- if mode in ['separate', 'ha'] -%} diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index f94b05bcb57..bc7388d9750 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -9,14 +9,14 @@ ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}" delete_group_vars: no tasks: - - name: include vars for test {{ ci_job_name }} + - name: Include vars for test {{ ci_job_name }} include_vars: "../files/{{ ci_job_name }}.yml" - - name: replace_test_id + - name: Replace_test_id set_fact: test_name: "{{ test_id |regex_replace('\\.', '-') }}" - - name: set instance names + - name: Set instance names set_fact: instance_names: >- {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%} diff --git a/tests/cloud_playbooks/delete-gce.yml b/tests/cloud_playbooks/delete-gce.yml index b88abea1c7a..d53e91b0022 100644 --- a/tests/cloud_playbooks/delete-gce.yml +++ b/tests/cloud_playbooks/delete-gce.yml @@ -6,11 +6,11 @@ mode: default tasks: - - name: replace_test_id + - name: Replace_test_id set_fact: test_name: "{{ test_id |regex_replace('\\.', '-') }}" - - name: set instance names + - name: Set instance names set_fact: instance_names: >- {%- if mode in ['separate', 'ha'] -%} @@ -19,7 +19,7 @@ k8s-{{ test_name }}-1,k8s-{{ test_name }}-2 {%- endif -%} - - name: stop gce instances + - name: Stop gce instances google.cloud.gcp_compute_instance: instance_names: "{{ instance_names }}" image: "{{ cloud_image | default(omit) }}" @@ -33,7 +33,7 @@ poll: 3 register: gce - - name: delete gce instances + - name: Delete gce instances google.cloud.gcp_compute_instance: instance_names: "{{ instance_names }}" image: "{{ cloud_image | default(omit) }}" diff --git a/tests/cloud_playbooks/upload-logs-gcs.yml b/tests/cloud_playbooks/upload-logs-gcs.yml index eeb0edb799f..42600ad4183 100644 --- a/tests/cloud_playbooks/upload-logs-gcs.yml +++ b/tests/cloud_playbooks/upload-logs-gcs.yml @@ -12,7 +12,7 @@ changed_when: false register: out - - name: replace_test_id + - name: Replace_test_id set_fact: test_name: "kargo-ci-{{ out.stdout_lines[0] }}"