Skip to content

Commit

Permalink
libvirt: support s390x cluster
Browse files Browse the repository at this point in the history
- create s390x cluster with libvirt
- show e2e-test result for s390x libvirt cluster

Signed-off-by: Da Li Liu <liudali@cn.ibm.com>
  • Loading branch information
Da Li Liu committed Nov 23, 2023
1 parent 6713f65 commit 6a46210
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 14 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/daily-e2e-tests-ibmcloud.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ jobs:
include:
- type: e2e_amd64
- type: libvirt_amd64
- type: libvirt_s390x
- type: s390x-non-secure-execution
- type: s390x-secure-execution
- type: csi_wrapper_x86_64
Expand Down Expand Up @@ -81,7 +82,7 @@ jobs:
echo "The built podvm image is based on CAA commit_id: ${caa_commit_id}"
arch_string=""
podvm_docker_name=""
if [[ "${{matrix.type}}" != "csi_wrapper_x86_64" ]] && [[ "${{matrix.type}}" != "libvirt_amd64" ]]; then
if [[ "${{matrix.type}}" != "csi_wrapper_x86_64" ]] && [[ "${{matrix.type}}" != "libvirt_amd64" ]] && [[ "${{matrix.type}}" != "libvirt_s390x" ]]; then
case "${{matrix.type}}" in
e2e_amd64)
arch_string="amd64"
Expand Down
11 changes: 9 additions & 2 deletions libvirt/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,13 @@ You must have a Linux/KVM system with libvirt installed and the following tools:
Assume that you have a 'default' network and storage pools created in libvirtd system instance (`qemu:///system`). However,
if you have a different pool name then the scripts should be able to handle it properly.

**Note:** Installation of the [kcli](https://kcli.readthedocs.io/en/latest/#dev-installation) is supported exclusively for s390x machines using pypi. Follow these simple commands for installation on one ubuntu20.04 s390x vsi:
```bash
apt-get update
apt-get -y install python3-pip genisoimage qemu-kvm libvirt-daemon-system libvirt-dev cpu-checker
pip3 install kcli
```

## Create the Kubernetes cluster

Use the [`kcli_cluster.sh`](./kcli_cluster.sh) script to create a simple two VMs (one control plane and one worker) cluster
Expand Down Expand Up @@ -46,8 +53,8 @@ $ kcli list kube
+-----------+---------+-----------+-----------------------------------------+
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
peer-pods-ctlplane-0 Ready control-plane,master 6m8s v1.25.3
peer-pods-worker-0 Ready worker 2m47s v1.25.3
peer-pods-ctlplane-0 Ready control-plane,master 6m8s v1.26.7
peer-pods-worker-0 Ready worker 2m47s v1.26.7
```

## Prepare the Pod VM volume
Expand Down
50 changes: 39 additions & 11 deletions libvirt/kcli_cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,28 +41,50 @@ wait_for_process() {
# Create the cluster.
#
create () {
kcli create kube generic \
-P domain="kata.com" \
-P pool="$LIBVIRT_POOL" \
-P ctlplanes="$CLUSTER_CONTROL_NODES" \
-P workers="$CLUSTER_WORKERS" \
-P network="$LIBVIRT_NETWORK" \
-P image="$CLUSTER_IMAGE" \
parameters="-P domain=kata.com \
-P pool=$LIBVIRT_POOL \
-P ctlplanes=$CLUSTER_CONTROL_NODES \
-P workers=$CLUSTER_WORKERS \
-P network=$LIBVIRT_NETWORK \
-P image=$CLUSTER_IMAGE \
-P sdn=flannel \
-P nfs=false \
-P disk_size="$CLUSTER_DISK_SIZE" \
-P version="$CLUSTER_VERSION" \
"$CLUSTER_NAME"
-P disk_size=$CLUSTER_DISK_SIZE \
-P version=$CLUSTER_VERSION"
# The autolabeller and multus images do not support s390x arch yet
# disable them for s390x cluster
if [[ ${ARCH} == "s390x" ]]; then
parameters="$parameters \
-P arch=$ARCH \
-P multus=false \
-P autolabeller=false "
fi
# Create the default storage pool if not defined
if ! virsh pool-list --all | grep default >/dev/null; then
virsh pool-define-as default dir - - - - "/var/lib/libvirt/images"
virsh pool-build default
virsh pool-start default
fi
# kcli support download images archs: 'x86_64', 'aarch64', 'ppc64le', 's390x'
kcli download image $CLUSTER_IMAGE -a ${ARCH/amd64/x86_64}
kcli create kube generic $parameters "$CLUSTER_NAME"

export KUBECONFIG=$HOME/.kcli/clusters/$CLUSTER_NAME/auth/kubeconfig

local cmd="kubectl get nodes | grep '\<Ready\>.*worker'"
# The autolabeller docker image do not support s390x arch yet
# use node name to wait one worker node in 'Ready' status and then label worker nodes
local cmd="kubectl get nodes --no-headers | grep 'worker-.* Ready'"
echo "Wait at least one worker be Ready"
if ! wait_for_process "330" "30" "$cmd"; then
echo "ERROR: worker nodes not ready."
kubectl get nodes
exit 1
fi
workers=$(kubectl get nodes -o name --no-headers | grep 'worker')
for worker in $workers; do
kubectl label "$worker" node.kubernetes.io/worker=
kubectl label "$worker" node-role.kubernetes.io/worker=
done

# Ensure that system pods are running or completed.
cmd="[ \$(kubectl get pods -A --no-headers | grep -v 'Running\|Completed' | wc -l) -eq 0 ]"
Expand Down Expand Up @@ -113,6 +135,12 @@ main() {
echo "ERROR: kcli command is required. See https://kcli.readthedocs.io/en/latest/#installation"
exit 1
fi
ARCH=$(uname -m)
export ARCH=${ARCH/x86_64/amd64}
if ! command -v kubectl >/dev/null; then
curl -fL --progress-bar -o /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$ARCH/kubectl" && \
chmod +x /usr/bin/kubectl
fi

kcli_version="$(kcli version | awk '{ print $2}')"
if [ "${kcli_version/.*/}" -lt "${kcli_version_min/.*/}" ];then
Expand Down

0 comments on commit 6a46210

Please sign in to comment.