From 31a6dcd37f13bbdf85578f69ed70432d3b2b8d74 Mon Sep 17 00:00:00 2001 From: Antti Kervinen Date: Wed, 4 Aug 2021 11:23:06 +0300 Subject: [PATCH 1/2] e2e: add support for distro=sles --- demo/lib/distro.bash | 38 +++++++++++++++++++++++++++++++------- demo/lib/vm.bash | 12 ++++++++++++ test/e2e/run.sh | 5 +++++ 3 files changed, 48 insertions(+), 7 deletions(-) diff --git a/demo/lib/distro.bash b/demo/lib/distro.bash index 3bfb62957..c7460d309 100644 --- a/demo/lib/distro.bash +++ b/demo/lib/distro.bash @@ -25,6 +25,7 @@ distro-refresh-pkg-db() { distro-resolve "$@"; } distro-install-pkg() { distro-resolve "$@"; } distro-remove-pkg() { distro-resolve "$@"; } distro-setup-proxies() { distro-resolve "$@"; } +distro-install-utils() { distro-resolve "$@"; } distro-install-golang() { distro-resolve "$@"; } distro-install-containerd() { distro-resolve "$@"; } distro-config-containerd() { distro-resolve "$@"; } @@ -78,6 +79,7 @@ distro-resolve-fn() { centos*) candidates="$candidates fedora-$apifn rpm-$apifn";; fedora*) candidates="$candidates rpm-$apifn";; *suse*) candidates="$candidates rpm-$apifn";; + sles*) candidates="$candidates opensuse-$apifn rpm-$apifn";; esac case $apifn in *-pre|*-post) ;; @@ -398,11 +400,26 @@ EOF ########################################################################### # -# OpenSUSE 15.2 +# OpenSUSE 15.2 and SLES # ZYPPER="zypper --non-interactive --no-gpg-checks" +sles-image-url() { + echo "/DOWNLOAD-MANUALLY-TO-HOME/vms/images/SLES15-SP3-JeOS.x86_64-15.3-OpenStack-Cloud-GM.qcow2" +} + +sles-ssh-user() { + echo "sles" +} + +sles-install-utils() { + vm-command-q "$ZYPPER lr openSUSE-Oss >/dev/null" || { + distro-install-repo http://download.opensuse.org/distribution/leap/15.3/repo/oss/ openSUSE-Oss + } + distro-install-pkg sysvinit-tools psmisc +} + opensuse-image-url() { echo "https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.2/images/openSUSE-Leap-15.2-OpenStack.x86_64-0.0.4-Build8.25.qcow2" } @@ -438,12 +455,12 @@ opensuse-remove-pkg() { } opensuse-install-golang() { - opensuse-install-pkg wget tar gzip git-core + distro-install-pkg wget tar gzip git-core from-tarball-install-golang } opensuse-wait-for-zypper() { - vm-run-until --timeout 5 '( ! pidof zypper >/dev/null ) || ( killall zypper; sleep 1; exit 1 )' || + vm-run-until --timeout 5 '( ! pgrep zypper >/dev/null ) || ( pkill -9 zypper; sleep 1; exit 1 )' || error "Failed to stop zypper running in the background" } @@ -458,7 +475,7 @@ opensuse-install-containerd() { opensuse-install-repo https://download.opensuse.org/repositories/Virtualization:containers/openSUSE_Leap_15.2/Virtualization:containers.repo opensuse-refresh-pkg-db fi - opensuse-install-pkg --from Virtualization_containers containerd containerd-ctr + distro-install-pkg --from Virtualization_containers containerd containerd-ctr vm-command "ln -sf /usr/sbin/containerd-ctr /usr/sbin/ctr" cat < /proc/sys/net/ipv4/ip_forward" vm-command "zypper ls" if ! grep -q snappy <<< "$COMMAND_OUTPUT"; then - opensuse-install-repo "http://download.opensuse.org/repositories/system:/snappy/openSUSE_Leap_15.2 snappy" - opensuse-refresh-pkg-db - opensuse-install-pkg "snapd apparmor-profiles socat ebtables cri-tools conntrackd" + distro-install-repo "http://download.opensuse.org/repositories/system:/snappy/openSUSE_Leap_15.2 snappy" + distro-refresh-pkg-db + distro-install-pkg "snapd apparmor-profiles socat ebtables cri-tools conntrackd iptables ethtool" fi vm-install-containernetworking vm-command "systemctl enable --now snapd" @@ -642,6 +659,13 @@ EOF done } +default-install-utils() { + # $distro-install-utils() is responsible for installing common + # utilities, such as pidof and killall, that the test framework + # and tests in general can expect to be found on VM. + : +} + default-k8s-cni() { echo cilium } diff --git a/demo/lib/vm.bash b/demo/lib/vm.bash index 8f233f24f..5b15f887f 100644 --- a/demo/lib/vm.bash +++ b/demo/lib/vm.bash @@ -632,6 +632,18 @@ vm-install-pkg() { distro-install-pkg "$@" } +vm-setup-oneshot() { + local util + distro-refresh-pkg-db + distro-install-utils + # Verify that all required utilities exit on the VM. + for util in pidof killall; do + vm-command-q "command -v $util >/dev/null" || { + error "required command '$util' missing on VM, fix/implement $distro-install-utils()" + } + done +} + vm-install-golang() { distro-install-golang } diff --git a/test/e2e/run.sh b/test/e2e/run.sh index b8b8cf5c9..984c64c46 100755 --- a/test/e2e/run.sh +++ b/test/e2e/run.sh @@ -1161,6 +1161,11 @@ fi is-hooked "on_vm_online" && run-hook "on_vm_online" +if [ "$reinstall_oneshot" == "1" ] || ! vm-command-q "[ -f .vm-setup-oneshot ]"; then + vm-setup-oneshot + vm-command-q "touch .vm-setup-oneshot" +fi + if [ -n "$vm_files" ]; then install-files "$vm_files" fi From 05af4e5a35730e860ed1f6fc9100ace05b06b3b1 Mon Sep 17 00:00:00 2001 From: Antti Kervinen Date: Thu, 5 Aug 2021 16:48:39 +0300 Subject: [PATCH 2/2] e2e: support reinstalling runc, k8s and all. - If a reinstall variable is set for any package, it will be forced to reinstall on the system. - OpenSUSE / SLES install runc from Virtualization_containers. - Reinstalling k8s means scrapping and recreating the cluster on VM in addition to reinstalling kubeadm, kubectl and kubelet. - Refactor: get rid of screen-install-k8s. --- demo/lib/distro.bash | 74 ++++++++++++++++++++++++++++++++++++++------ demo/lib/vm.bash | 11 ++++--- test/e2e/run.sh | 42 ++++++++++++++++--------- 3 files changed, 99 insertions(+), 28 deletions(-) diff --git a/demo/lib/distro.bash b/demo/lib/distro.bash index c7460d309..73319b6d0 100644 --- a/demo/lib/distro.bash +++ b/demo/lib/distro.bash @@ -27,6 +27,7 @@ distro-remove-pkg() { distro-resolve "$@"; } distro-setup-proxies() { distro-resolve "$@"; } distro-install-utils() { distro-resolve "$@"; } distro-install-golang() { distro-resolve "$@"; } +distro-install-runc() { distro-resolve "$@"; } distro-install-containerd() { distro-resolve "$@"; } distro-config-containerd() { distro-resolve "$@"; } distro-restart-containerd() { distro-resolve "$@"; } @@ -194,7 +195,18 @@ debian-install-pkg() { # /etc/containerd/config.toml and then apt-get installs # containerd. 'yes ""' will continue with the default answer (N: # keep existing) in this case. Without 'yes' installation fails. - vm-command "yes \"\" | apt-get install -y $*" || + + # Add apt-get option "--reinstall" if any environment variable + # reinstall_=1 + local pkg + local opts="" + for pkg in "$@"; do + if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then + opts="$opts --reinstall" + break + fi + done + vm-command "yes \"\" | apt-get install $opts -y $*" || command-error "failed to install $*" } @@ -319,8 +331,27 @@ fedora-install-repo() { } fedora-install-pkg() { + local pkg + local do_reinstall=0 + for pkg in "$@"; do + if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then + do_reinstall=1 + break + fi + done vm-command "dnf install -y $*" || command-error "failed to install $*" + # When requesting reinstallation, detect which packages were + # already installed and reinstall those. + # (Unlike apt and zypper, dnf offers no option for reinstalling + # existing and installing new packages on the same run.) + if [ "$do_reinstall" == "1" ]; then + local reinstall_pkgs + reinstall_pkgs=$(awk -F '[ -]' -v ORS=" " '/Package .* already installed/{print $2}' <<< "$COMMAND_OUTPUT") + if [ -n "$reinstall_pkgs" ]; then + vm-command "dnf reinstall -y $reinstall_pkgs" + fi + fi } fedora-remove-pkg() { @@ -439,13 +470,23 @@ opensuse-install-repo() { } opensuse-refresh-pkg-db() { + opensuse-wait-for-zypper vm-command "$ZYPPER refresh" || - command-err "failed to refresh zypper package DB" + command-error "failed to refresh zypper package DB" } opensuse-install-pkg() { opensuse-wait-for-zypper - vm-command "$ZYPPER install $*" || + # Add zypper option "--force" if environment variable reinstall_=1 + local pkg + local opts="" + for pkg in "$@"; do + if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then + opts="$opts --force" + break + fi + done + vm-command "$ZYPPER install $opts $*" || command-error "failed to install $*" } @@ -464,17 +505,27 @@ opensuse-wait-for-zypper() { error "Failed to stop zypper running in the background" } -opensuse-install-crio-pre() { - distro-install-pkg runc conmon - vm-command "ln -sf /usr/lib64/libdevmapper.so.1.02 /usr/lib64/libdevmapper.so.1.02.1" || true -} - -opensuse-install-containerd() { +opensuse-require-repo-virtualization-containers() { vm-command "zypper ls" if ! grep -q Virtualization_containers <<< "$COMMAND_OUTPUT"; then opensuse-install-repo https://download.opensuse.org/repositories/Virtualization:containers/openSUSE_Leap_15.2/Virtualization:containers.repo opensuse-refresh-pkg-db fi +} + +opensuse-install-crio-pre() { + opensuse-require-repo-virtualization-containers + distro-install-pkg --from Virtualization_containers runc conmon + vm-command "ln -sf /usr/lib64/libdevmapper.so.1.02 /usr/lib64/libdevmapper.so.1.02.1" || true +} + +opensuse-install-runc() { + opensuse-require-repo-virtualization-containers + distro-install-pkg --from Virtualization_containers runc +} + +opensuse-install-containerd() { + opensuse-require-repo-virtualization-containers distro-install-pkg --from Virtualization_containers containerd containerd-ctr vm-command "ln -sf /usr/sbin/containerd-ctr /usr/sbin/ctr" @@ -670,9 +721,12 @@ default-k8s-cni() { echo cilium } +default-install-runc() { + distro-install-pkg runc +} + default-install-containerd() { vm-command-q "[ -f /usr/bin/containerd ]" || { - distro-refresh-pkg-db distro-install-pkg containerd } } diff --git a/demo/lib/vm.bash b/demo/lib/vm.bash index 5b15f887f..e360fb58c 100644 --- a/demo/lib/vm.bash +++ b/demo/lib/vm.bash @@ -145,7 +145,6 @@ vm-check-running-binary() { return 0 } - vm-check-source-files-changed() { local bin_change local src_change @@ -634,7 +633,7 @@ vm-install-pkg() { vm-setup-oneshot() { local util - distro-refresh-pkg-db + ( distro-refresh-pkg-db ) || true distro-install-utils # Verify that all required utilities exit on the VM. for util in pidof killall; do @@ -660,7 +659,7 @@ vm-install-runc() { fi vm-put-file "$host_runc" "$vm_runc" else - distro-install-pkg runc + distro-install-runc fi } @@ -780,8 +779,12 @@ vm-create-cluster() { vm-command "cp /etc/kubernetes/admin.conf ~root/.kube/config" } +vm-destroy-cluster() { + vm-command "yes | kubeadm reset; rm -f \$HOME/.kube/config ~root/.kube/config /etc/kubernetes" +} + vm-install-cni-cilium() { - vm-command "kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml" + vm-command "kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml" if ! vm-command "kubectl rollout status --timeout=360s -n kube-system daemonsets/cilium"; then command-error "installing cilium CNI to Kubernetes timed out" fi diff --git a/test/e2e/run.sh b/test/e2e/run.sh index 984c64c46..871d13664 100755 --- a/test/e2e/run.sh +++ b/test/e2e/run.sh @@ -64,6 +64,8 @@ usage() { echo " then reinstall and restart it before starting test run." echo " The default is 0." echo " Set containerd_src/crio_src/runc_src to install a local build." + echo " reinstall_k8s: if 1, destroy existing k8s cluster and create a new one." + echo " reinstall_all: if 1, set all above reinstall_* options to 1." echo " omit_cri_resmgr: if 1, omit checking/installing/starting cri-resmgr." echo " omit_agent: if 1, omit checking/installing/starting cri-resmgr-agent." echo " outdir: Save output under given directory." @@ -155,13 +157,6 @@ screen-create-vm() { fi } -screen-install-k8s() { - speed=60 out "### Installing CRI Runtime to the VM." - vm-install-cri - speed=60 out "### Installing Kubernetes to the VM." - vm-install-k8s -} - screen-install-cri-resmgr() { speed=60 out "### Installing CRI Resource Manager to VM." vm-install-cri-resmgr @@ -974,7 +969,7 @@ test-user-code() { } # Validate parameters -input_var_names="mode user_script_file distro k8scri vm cgroups speed binsrc reinstall_containerd reinstall_crio reinstall_cri_resmgr outdir cleanup on_verify_fail on_create_fail on_verify on_create on_launch topology cri_resmgr_cfg cri_resmgr_extra_args cri_resmgr_agent_extra_args code py_consts" +input_var_names="mode user_script_file distro k8scri vm cgroups speed binsrc reinstall_all reinstall_containerd reinstall_crio reinstall_cri_resmgr reinstall_k8s reinstall_oneshot outdir cleanup on_verify_fail on_create_fail on_verify on_create on_launch topology cri_resmgr_cfg cri_resmgr_extra_args cri_resmgr_agent_extra_args code py_consts" INTERACTIVE_MODE=0 mode=$1 @@ -1028,11 +1023,22 @@ cri_resmgr_cfg=${cri_resmgr_cfg:-"${SCRIPT_DIR}/cri-resmgr-topology-aware.cfg"} cri_resmgr_extra_args=${cri_resmgr_extra_args:-""} cri_resmgr_agent_extra_args=${cri_resmgr_agent_extra_args:-""} cleanup=${cleanup:-0} +reinstall_all=${reinstall_all:-0} reinstall_containerd=${reinstall_containerd:-0} -reinstall_crio=${reinstall_crio:-0} reinstall_cri_resmgr=${reinstall_cri_resmgr:-0} reinstall_cri_resmgr_agent=${reinstall_cri_resmgr_agent:-0} +reinstall_crio=${reinstall_crio:-0} +reinstall_k8s=${reinstall_k8s:-0} +reinstall_kubeadm=${reinstall_kubeadm:-0} +reinstall_kubectl=${reinstall_kubectl:-0} +reinstall_kubelet=${reinstall_kubelet:-0} +reinstall_oneshot=${reinstall_oneshot:-0} reinstall_runc=${reinstall_runc:-0} +if [ "$reinstall_all" == "1" ]; then + for reinstall_var in ${!reinstall_*}; do + eval "${reinstall_var}=1" + done +fi omit_agent=${omit_agent:-0} omit_cri_resmgr=${omit_cri_resmgr:-0} py_consts="${py_consts:-''}" @@ -1170,16 +1176,20 @@ if [ -n "$vm_files" ]; then install-files "$vm_files" fi -if ! vm-command-q "type -p kubelet >/dev/null"; then - screen-install-k8s +if [ "$reinstall_containerd" == "1" ] || [ "$reinstall_crio" == "1" ] || ! vm-command-q "( type -p containerd || type -p crio ) >/dev/null"; then + vm-install-cri fi -if [ "$reinstall_runc" == "1" ]; then +# runc is installed as a dependency of containerd and crio. +# If reinstalling runc is explictly wished for, it is safe to do +# only after (re)installing contaienrd/crio. Otherwise +# a custom locally built runc may be overridden from packages. +if [ "$reinstall_runc" == "1" ] || ! vm-command-q "type -p runc >/dev/null"; then vm-install-runc fi -if [ "$reinstall_containerd" == "1" ] || [ "$reinstall_crio" == "1" ]; then - vm-install-cri +if [ "$reinstall_k8s" == "1" ] || ! vm-command-q "type -p kubelet >/dev/null"; then + vm-install-k8s fi if [ "$reinstall_cri_resmgr" == "1" ]; then @@ -1252,6 +1262,10 @@ if [ "$omit_cri_resmgr" != "1" ]; then fi # Create kubernetes cluster or wait that it is online +if [ "$reinstall_k8s" == "1" ]; then + vm-destroy-cluster +fi + if vm-command-q "[ ! -f /var/lib/kubelet/config.yaml ]"; then screen-create-singlenode-cluster else