Skip to content

Commit

Permalink
e2e: support reinstalling runc, k8s and all.
Browse files Browse the repository at this point in the history
- If a reinstall variable is set for any package, it will be
  forced to reinstall on the system.
- OpenSUSE / SLES install runc from Virtualization_containers.
- Reinstalling k8s means scrapping and recreating the cluster on VM
  in addition to reinstalling kubeadm, kubectl and kubelet.
- Refactor: get rid of screen-install-k8s.
  • Loading branch information
askervin committed Aug 6, 2021
1 parent 31a6dcd commit ff20134
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 27 deletions.
74 changes: 64 additions & 10 deletions demo/lib/distro.bash
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ distro-remove-pkg() { distro-resolve "$@"; }
distro-setup-proxies() { distro-resolve "$@"; }
distro-install-utils() { distro-resolve "$@"; }
distro-install-golang() { distro-resolve "$@"; }
distro-install-runc() { distro-resolve "$@"; }
distro-install-containerd() { distro-resolve "$@"; }
distro-config-containerd() { distro-resolve "$@"; }
distro-restart-containerd() { distro-resolve "$@"; }
Expand Down Expand Up @@ -194,7 +195,18 @@ debian-install-pkg() {
# /etc/containerd/config.toml and then apt-get installs
# containerd. 'yes ""' will continue with the default answer (N:
# keep existing) in this case. Without 'yes' installation fails.
vm-command "yes \"\" | apt-get install -y $*" ||

# Add apt-get option "--reinstall" if any environment variable
# reinstall_<pkg>=1
local pkg
local opts=""
for pkg in "$@"; do
if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then
opts="$opts --reinstall"
break
fi
done
vm-command "yes \"\" | apt-get install $opts -y $*" ||
command-error "failed to install $*"
}

Expand Down Expand Up @@ -319,8 +331,27 @@ fedora-install-repo() {
}

fedora-install-pkg() {
local pkg
local do_reinstall=0
for pkg in "$@"; do
if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then
do_reinstall=1
break
fi
done
vm-command "dnf install -y $*" ||
command-error "failed to install $*"
# When requesting reinstallation, detect which packages were
# already installed and reinstall those.
# (Unlike apt and zypper, dnf offers no option for reinstalling
# existing and installing new packages on the same run.)
if [ "$do_reinstall" == "1" ]; then
local reinstall_pkgs
reinstall_pkgs=$(awk -F '[ -]' -v ORS=" " '/Package .* already installed/{print $2}' <<< "$COMMAND_OUTPUT")
if [ -n "$reinstall_pkgs" ]; then
vm-command "dnf reinstall -y $reinstall_pkgs"
fi
fi
}

fedora-remove-pkg() {
Expand Down Expand Up @@ -439,13 +470,23 @@ opensuse-install-repo() {
}

opensuse-refresh-pkg-db() {
opensuse-wait-for-zypper
vm-command "$ZYPPER refresh" ||
command-err "failed to refresh zypper package DB"
command-error "failed to refresh zypper package DB"
}

opensuse-install-pkg() {
opensuse-wait-for-zypper
vm-command "$ZYPPER install $*" ||
# Add zypper option "--force" if environment variable reinstall_<pkg>=1
local pkg
local opts=""
for pkg in "$@"; do
if [ "$(eval echo \$reinstall_$pkg)" == "1" ]; then
opts="$opts --force"
break
fi
done
vm-command "$ZYPPER install $opts $*" ||
command-error "failed to install $*"
}

Expand All @@ -464,17 +505,27 @@ opensuse-wait-for-zypper() {
error "Failed to stop zypper running in the background"
}

opensuse-install-crio-pre() {
distro-install-pkg runc conmon
vm-command "ln -sf /usr/lib64/libdevmapper.so.1.02 /usr/lib64/libdevmapper.so.1.02.1" || true
}

opensuse-install-containerd() {
opensuse-require-repo-virtualization-containers() {
vm-command "zypper ls"
if ! grep -q Virtualization_containers <<< "$COMMAND_OUTPUT"; then
opensuse-install-repo https://download.opensuse.org/repositories/Virtualization:containers/openSUSE_Leap_15.2/Virtualization:containers.repo
opensuse-refresh-pkg-db
fi
}

opensuse-install-crio-pre() {
opensuse-require-repo-virtualization-containers
distro-install-pkg --from Virtualization_containers runc conmon
vm-command "ln -sf /usr/lib64/libdevmapper.so.1.02 /usr/lib64/libdevmapper.so.1.02.1" || true
}

opensuse-install-runc() {
opensuse-require-repo-virtualization-containers
distro-install-pkg --from Virtualization_containers runc
}

opensuse-install-containerd() {
opensuse-require-repo-virtualization-containers
distro-install-pkg --from Virtualization_containers containerd containerd-ctr
vm-command "ln -sf /usr/sbin/containerd-ctr /usr/sbin/ctr"

Expand Down Expand Up @@ -670,9 +721,12 @@ default-k8s-cni() {
echo cilium
}

default-install-runc() {
distro-install-pkg runc
}

default-install-containerd() {
vm-command-q "[ -f /usr/bin/containerd ]" || {
distro-refresh-pkg-db
distro-install-pkg containerd
}
}
Expand Down
9 changes: 6 additions & 3 deletions demo/lib/vm.bash
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,6 @@ vm-check-running-binary() {
return 0
}


vm-check-source-files-changed() {
local bin_change
local src_change
Expand Down Expand Up @@ -660,7 +659,7 @@ vm-install-runc() {
fi
vm-put-file "$host_runc" "$vm_runc"
else
distro-install-pkg runc
distro-install-runc
fi
}

Expand Down Expand Up @@ -780,8 +779,12 @@ vm-create-cluster() {
vm-command "cp /etc/kubernetes/admin.conf ~root/.kube/config"
}

vm-destroy-cluster() {
vm-command "yes | kubeadm reset; rm -f \$HOME/.kube/config ~root/.kube/config /etc/kubernetes"
}

vm-install-cni-cilium() {
vm-command "kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml"
vm-command "kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml"
if ! vm-command "kubectl rollout status --timeout=360s -n kube-system daemonsets/cilium"; then
command-error "installing cilium CNI to Kubernetes timed out"
fi
Expand Down
42 changes: 28 additions & 14 deletions test/e2e/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ usage() {
echo " then reinstall and restart it before starting test run."
echo " The default is 0."
echo " Set containerd_src/crio_src/runc_src to install a local build."
echo " reinstall_k8s: if 1, destroy existing k8s cluster and create a new one."
echo " reinstall_all: if 1, set all above reinstall_* options to 1."
echo " omit_cri_resmgr: if 1, omit checking/installing/starting cri-resmgr."
echo " omit_agent: if 1, omit checking/installing/starting cri-resmgr-agent."
echo " outdir: Save output under given directory."
Expand Down Expand Up @@ -155,13 +157,6 @@ screen-create-vm() {
fi
}

screen-install-k8s() {
speed=60 out "### Installing CRI Runtime to the VM."
vm-install-cri
speed=60 out "### Installing Kubernetes to the VM."
vm-install-k8s
}

screen-install-cri-resmgr() {
speed=60 out "### Installing CRI Resource Manager to VM."
vm-install-cri-resmgr
Expand Down Expand Up @@ -974,7 +969,7 @@ test-user-code() {
}

# Validate parameters
input_var_names="mode user_script_file distro k8scri vm cgroups speed binsrc reinstall_containerd reinstall_crio reinstall_cri_resmgr outdir cleanup on_verify_fail on_create_fail on_verify on_create on_launch topology cri_resmgr_cfg cri_resmgr_extra_args cri_resmgr_agent_extra_args code py_consts"
input_var_names="mode user_script_file distro k8scri vm cgroups speed binsrc reinstall_all reinstall_containerd reinstall_crio reinstall_cri_resmgr reinstall_k8s reinstall_oneshot outdir cleanup on_verify_fail on_create_fail on_verify on_create on_launch topology cri_resmgr_cfg cri_resmgr_extra_args cri_resmgr_agent_extra_args code py_consts"

INTERACTIVE_MODE=0
mode=$1
Expand Down Expand Up @@ -1028,11 +1023,22 @@ cri_resmgr_cfg=${cri_resmgr_cfg:-"${SCRIPT_DIR}/cri-resmgr-topology-aware.cfg"}
cri_resmgr_extra_args=${cri_resmgr_extra_args:-""}
cri_resmgr_agent_extra_args=${cri_resmgr_agent_extra_args:-""}
cleanup=${cleanup:-0}
reinstall_all=${reinstall_all:-0}
reinstall_containerd=${reinstall_containerd:-0}
reinstall_crio=${reinstall_crio:-0}
reinstall_cri_resmgr=${reinstall_cri_resmgr:-0}
reinstall_cri_resmgr_agent=${reinstall_cri_resmgr_agent:-0}
reinstall_crio=${reinstall_crio:-0}
reinstall_k8s=${reinstall_k8s:-0}
reinstall_kubeadm=${reinstall_kubeadm:-0}
reinstall_kubectl=${reinstall_kubectl:-0}
reinstall_kubelet=${reinstall_kubelet:-0}
reinstall_oneshot=${reinstall_oneshot:-0}
reinstall_runc=${reinstall_runc:-0}
if [ "$reinstall_all" == "1" ]; then
for reinstall_var in ${!reinstall_*}; do
eval "${reinstall_var}=1"
done
fi
omit_agent=${omit_agent:-0}
omit_cri_resmgr=${omit_cri_resmgr:-0}
py_consts="${py_consts:-''}"
Expand Down Expand Up @@ -1170,16 +1176,20 @@ if [ -n "$vm_files" ]; then
install-files "$vm_files"
fi

if ! vm-command-q "type -p kubelet >/dev/null"; then
screen-install-k8s
if [ "$reinstall_containerd" == "1" ] || [ "$reinstall_crio" == "1" ] || ! vm-command-q "( type -p containerd || type -p crio ) >/dev/null"; then
vm-install-cri
fi

if [ "$reinstall_runc" == "1" ]; then
# runc is installed as a dependency of containerd and crio.
# If reinstalling runc is explictly wished for, it is safe to do
# only after (re)installing contaienrd/crio. Otherwise
# a custom locally built runc may be overridden from packages.
if [ "$reinstall_runc" == "1" ] || ! vm-command-q "type -p runc >/dev/null"; then
vm-install-runc
fi

if [ "$reinstall_containerd" == "1" ] || [ "$reinstall_crio" == "1" ]; then
vm-install-cri
if [ "$reinstall_k8s" == "1" ] || ! vm-command-q "type -p kubelet >/dev/null"; then
vm-install-k8s
fi

if [ "$reinstall_cri_resmgr" == "1" ]; then
Expand Down Expand Up @@ -1252,6 +1262,10 @@ if [ "$omit_cri_resmgr" != "1" ]; then
fi

# Create kubernetes cluster or wait that it is online
if [ "$reinstall_k8s" == "1" ]; then
vm-destroy-cluster
fi

if vm-command-q "[ ! -f /var/lib/kubelet/config.yaml ]"; then
screen-create-singlenode-cluster
else
Expand Down

0 comments on commit ff20134

Please sign in to comment.