From ff2013444e0e03604a4cdf7b8c9ba11943b265e7 Mon Sep 17 00:00:00 2001 From: Antti Kervinen Date: Thu, 5 Aug 2021 16:48:39 +0300 Subject: [PATCH] e2e: support reinstalling runc, k8s and all. - If a reinstall variable is set for any package, it will be forced to reinstall on the system. - OpenSUSE / SLES install runc from Virtualization_containers. - Reinstalling k8s means scrapping and recreating the cluster on VM in addition to reinstalling kubeadm, kubectl and kubelet. - Refactor: get rid of screen-install-k8s. --- demo/lib/distro.bash | 74 ++++++++++++++++++++++++++++++++++++++------ demo/lib/vm.bash | 9 ++++-- test/e2e/run.sh | 42 ++++++++++++++++--------- 3 files changed, 98 insertions(+), 27 deletions(-) diff --git a/demo/lib/distro.bash b/demo/lib/distro.bash index c7460d309a..73319b6d06 100644 --- a/demo/lib/distro.bash +++ b/demo/lib/distro.bash @@ -27,6 +27,7 @@ distro-remove-pkg() { distro-resolve "$@"; } distro-setup-proxies() { distro-resolve "$@"; } distro-install-utils() { distro-resolve "$@"; } distro-install-golang() { distro-resolve "$@"; } +distro-install-runc() { distro-resolve "$@"; } distro-install-containerd() { distro-resolve "$@"; } distro-config-containerd() { distro-resolve "$@"; } distro-restart-containerd() { distro-resolve "$@"; } @@ -194,7 +195,18 @@ debian-install-pkg() { # /etc/containerd/config.toml and then apt-get installs # containerd. 'yes ""' will continue with the default answer (N: # keep existing) in this case. Without 'yes' installation fails. - vm-command "yes \"\" | apt-get install -y $*" || + + # Add apt-get option "--reinstall" if any environment variable + # reinstall_=1 + local pkg + local opts="" + for pkg in "$@"; do + if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then + opts="$opts --reinstall" + break + fi + done + vm-command "yes \"\" | apt-get install $opts -y $*" || command-error "failed to install $*" } @@ -319,8 +331,27 @@ fedora-install-repo() { } fedora-install-pkg() { + local pkg + local do_reinstall=0 + for pkg in "$@"; do + if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then + do_reinstall=1 + break + fi + done vm-command "dnf install -y $*" || command-error "failed to install $*" + # When requesting reinstallation, detect which packages were + # already installed and reinstall those. + # (Unlike apt and zypper, dnf offers no option for reinstalling + # existing and installing new packages on the same run.) + if [ "$do_reinstall" == "1" ]; then + local reinstall_pkgs + reinstall_pkgs=$(awk -F '[ -]' -v ORS=" " '/Package .* already installed/{print $2}' <<< "$COMMAND_OUTPUT") + if [ -n "$reinstall_pkgs" ]; then + vm-command "dnf reinstall -y $reinstall_pkgs" + fi + fi } fedora-remove-pkg() { @@ -439,13 +470,23 @@ opensuse-install-repo() { } opensuse-refresh-pkg-db() { + opensuse-wait-for-zypper vm-command "$ZYPPER refresh" || - command-err "failed to refresh zypper package DB" + command-error "failed to refresh zypper package DB" } opensuse-install-pkg() { opensuse-wait-for-zypper - vm-command "$ZYPPER install $*" || + # Add zypper option "--force" if environment variable reinstall_=1 + local pkg + local opts="" + for pkg in "$@"; do + if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then + opts="$opts --force" + break + fi + done + vm-command "$ZYPPER install $opts $*" || command-error "failed to install $*" } @@ -464,17 +505,27 @@ opensuse-wait-for-zypper() { error "Failed to stop zypper running in the background" } -opensuse-install-crio-pre() { - distro-install-pkg runc conmon - vm-command "ln -sf /usr/lib64/libdevmapper.so.1.02 /usr/lib64/libdevmapper.so.1.02.1" || true -} - -opensuse-install-containerd() { +opensuse-require-repo-virtualization-containers() { vm-command "zypper ls" if ! grep -q Virtualization_containers <<< "$COMMAND_OUTPUT"; then opensuse-install-repo https://download.opensuse.org/repositories/Virtualization:containers/openSUSE_Leap_15.2/Virtualization:containers.repo opensuse-refresh-pkg-db fi +} + +opensuse-install-crio-pre() { + opensuse-require-repo-virtualization-containers + distro-install-pkg --from Virtualization_containers runc conmon + vm-command "ln -sf /usr/lib64/libdevmapper.so.1.02 /usr/lib64/libdevmapper.so.1.02.1" || true +} + +opensuse-install-runc() { + opensuse-require-repo-virtualization-containers + distro-install-pkg --from Virtualization_containers runc +} + +opensuse-install-containerd() { + opensuse-require-repo-virtualization-containers distro-install-pkg --from Virtualization_containers containerd containerd-ctr vm-command "ln -sf /usr/sbin/containerd-ctr /usr/sbin/ctr" @@ -670,9 +721,12 @@ default-k8s-cni() { echo cilium } +default-install-runc() { + distro-install-pkg runc +} + default-install-containerd() { vm-command-q "[ -f /usr/bin/containerd ]" || { - distro-refresh-pkg-db distro-install-pkg containerd } } diff --git a/demo/lib/vm.bash b/demo/lib/vm.bash index 5b15f887ff..43b95402f8 100644 --- a/demo/lib/vm.bash +++ b/demo/lib/vm.bash @@ -145,7 +145,6 @@ vm-check-running-binary() { return 0 } - vm-check-source-files-changed() { local bin_change local src_change @@ -660,7 +659,7 @@ vm-install-runc() { fi vm-put-file "$host_runc" "$vm_runc" else - distro-install-pkg runc + distro-install-runc fi } @@ -780,8 +779,12 @@ vm-create-cluster() { vm-command "cp /etc/kubernetes/admin.conf ~root/.kube/config" } +vm-destroy-cluster() { + vm-command "yes | kubeadm reset; rm -f \$HOME/.kube/config ~root/.kube/config /etc/kubernetes" +} + vm-install-cni-cilium() { - vm-command "kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml" + vm-command "kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml" if ! vm-command "kubectl rollout status --timeout=360s -n kube-system daemonsets/cilium"; then command-error "installing cilium CNI to Kubernetes timed out" fi diff --git a/test/e2e/run.sh b/test/e2e/run.sh index 984c64c468..871d136648 100755 --- a/test/e2e/run.sh +++ b/test/e2e/run.sh @@ -64,6 +64,8 @@ usage() { echo " then reinstall and restart it before starting test run." echo " The default is 0." echo " Set containerd_src/crio_src/runc_src to install a local build." + echo " reinstall_k8s: if 1, destroy existing k8s cluster and create a new one." + echo " reinstall_all: if 1, set all above reinstall_* options to 1." echo " omit_cri_resmgr: if 1, omit checking/installing/starting cri-resmgr." echo " omit_agent: if 1, omit checking/installing/starting cri-resmgr-agent." echo " outdir: Save output under given directory." @@ -155,13 +157,6 @@ screen-create-vm() { fi } -screen-install-k8s() { - speed=60 out "### Installing CRI Runtime to the VM." - vm-install-cri - speed=60 out "### Installing Kubernetes to the VM." - vm-install-k8s -} - screen-install-cri-resmgr() { speed=60 out "### Installing CRI Resource Manager to VM." vm-install-cri-resmgr @@ -974,7 +969,7 @@ test-user-code() { } # Validate parameters -input_var_names="mode user_script_file distro k8scri vm cgroups speed binsrc reinstall_containerd reinstall_crio reinstall_cri_resmgr outdir cleanup on_verify_fail on_create_fail on_verify on_create on_launch topology cri_resmgr_cfg cri_resmgr_extra_args cri_resmgr_agent_extra_args code py_consts" +input_var_names="mode user_script_file distro k8scri vm cgroups speed binsrc reinstall_all reinstall_containerd reinstall_crio reinstall_cri_resmgr reinstall_k8s reinstall_oneshot outdir cleanup on_verify_fail on_create_fail on_verify on_create on_launch topology cri_resmgr_cfg cri_resmgr_extra_args cri_resmgr_agent_extra_args code py_consts" INTERACTIVE_MODE=0 mode=$1 @@ -1028,11 +1023,22 @@ cri_resmgr_cfg=${cri_resmgr_cfg:-"${SCRIPT_DIR}/cri-resmgr-topology-aware.cfg"} cri_resmgr_extra_args=${cri_resmgr_extra_args:-""} cri_resmgr_agent_extra_args=${cri_resmgr_agent_extra_args:-""} cleanup=${cleanup:-0} +reinstall_all=${reinstall_all:-0} reinstall_containerd=${reinstall_containerd:-0} -reinstall_crio=${reinstall_crio:-0} reinstall_cri_resmgr=${reinstall_cri_resmgr:-0} reinstall_cri_resmgr_agent=${reinstall_cri_resmgr_agent:-0} +reinstall_crio=${reinstall_crio:-0} +reinstall_k8s=${reinstall_k8s:-0} +reinstall_kubeadm=${reinstall_kubeadm:-0} +reinstall_kubectl=${reinstall_kubectl:-0} +reinstall_kubelet=${reinstall_kubelet:-0} +reinstall_oneshot=${reinstall_oneshot:-0} reinstall_runc=${reinstall_runc:-0} +if [ "$reinstall_all" == "1" ]; then + for reinstall_var in ${!reinstall_*}; do + eval "${reinstall_var}=1" + done +fi omit_agent=${omit_agent:-0} omit_cri_resmgr=${omit_cri_resmgr:-0} py_consts="${py_consts:-''}" @@ -1170,16 +1176,20 @@ if [ -n "$vm_files" ]; then install-files "$vm_files" fi -if ! vm-command-q "type -p kubelet >/dev/null"; then - screen-install-k8s +if [ "$reinstall_containerd" == "1" ] || [ "$reinstall_crio" == "1" ] || ! vm-command-q "( type -p containerd || type -p crio ) >/dev/null"; then + vm-install-cri fi -if [ "$reinstall_runc" == "1" ]; then +# runc is installed as a dependency of containerd and crio. +# If reinstalling runc is explictly wished for, it is safe to do +# only after (re)installing contaienrd/crio. Otherwise +# a custom locally built runc may be overridden from packages. +if [ "$reinstall_runc" == "1" ] || ! vm-command-q "type -p runc >/dev/null"; then vm-install-runc fi -if [ "$reinstall_containerd" == "1" ] || [ "$reinstall_crio" == "1" ]; then - vm-install-cri +if [ "$reinstall_k8s" == "1" ] || ! vm-command-q "type -p kubelet >/dev/null"; then + vm-install-k8s fi if [ "$reinstall_cri_resmgr" == "1" ]; then @@ -1252,6 +1262,10 @@ if [ "$omit_cri_resmgr" != "1" ]; then fi # Create kubernetes cluster or wait that it is online +if [ "$reinstall_k8s" == "1" ]; then + vm-destroy-cluster +fi + if vm-command-q "[ ! -f /var/lib/kubelet/config.yaml ]"; then screen-create-singlenode-cluster else