Skip to content

Commit

Permalink
tests/cpu-vm: Update VM CPU pinning tests to use `limits.cpu.pin_stra…
Browse files Browse the repository at this point in the history
…tegy` config option and corresponding API extension

Signed-off-by: Kadin Sayani <kadin.sayani@canonical.com>
  • Loading branch information
kadinsayani committed Oct 1, 2024
1 parent 59f2a80 commit 00b31d1
Showing 1 changed file with 32 additions and 26 deletions.
58 changes: 32 additions & 26 deletions tests/cpu-vm
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,10 @@ if ! hasNeededAPIExtension cpu_hotplug; then
exit 0
fi

# required for "CPU auto pinning" feature check
# as we don't have a separate API extension for it
# and we rely on the debug output in the LXD daemon logs.
snap set lxd daemon.debug=true
systemctl restart snap.lxd.daemon.service
if ! hasNeededAPIExtension limits_cpu_pin_strategy; then
echo "Skipping test as CPU pinning not supported on ${LXD_SNAP_CHANNEL}"
exit 0
fi

# Configure LXD
lxc network create lxdbr0
Expand Down Expand Up @@ -52,8 +51,8 @@ fi

lxc delete v0

echo "==> Create ephemeral VM and boot"
lxc launch "${IMAGE}" v1 --vm -s "${poolName}" --ephemeral
echo "==> Create ephemeral VM with CPU pinning enabled and boot"
lxc launch "${IMAGE}" v1 --vm -c limits.cpu.pin_strategy=auto -s "${poolName}" --ephemeral
waitInstanceReady v1
lxc info v1

Expand Down Expand Up @@ -86,35 +85,42 @@ done
lxc config set v1 limits.cpu="${cpuCount}"
[ "$(lxc exec v1 -- ls /sys/devices/system/cpu | grep -xEc 'cpu[[:digit:]]+')" -eq "${cpuCount}" ]

# check that CPU affinity is automatically set if feature present
echo "==> Check that CPU affinity is automatically set when limits.cpu.pin_strategy=auto"
QEMU_PID=$(lxc info v1 | awk '/^PID:/ {print $2}')
if journalctl --quiet --no-hostname --no-pager --boot=0 --unit=snap.lxd.daemon.service | grep "Scheduler: virtual-machine"; then
# Check that there are processes with pinning set
# It will be shown like this (for limits.cpu=2):
# pid 2894's current affinity list: 6
# pid 2895's current affinity list: 8
# pid 2897's current affinity list: 0-15
# pid 2898's current affinity list: 0-15
# pid 2899's current affinity list: 0-15
# 2894 and 2895 have affinity set, while others don't
PINNED_THREADS_NUM=$(taskset --cpu-list -a -p "${QEMU_PID}" | grep -cE ':\s+[0-9]+$')
[ "${PINNED_THREADS_NUM}" -ge "$(lxc config get v1 limits.cpu)" ]
else
# check that there is no pinning set
! taskset --cpu-list -a -p "${QEMU_PID}" | grep -E ':\s+[0-9]+$' || false
taskset --cpu-list -a -p "${QEMU_PID}" | grep "0-$((cpuCount-1))"
fi
# Check that there are processes with pinning set
# It will be shown like this (for limits.cpu=2):
# pid 2894's current affinity list: 6
# pid 2895's current affinity list: 8
# pid 2897's current affinity list: 0-15
# pid 2898's current affinity list: 0-15
# pid 2899's current affinity list: 0-15
# 2894 and 2895 have affinity set, while others don't
PINNED_THREADS_NUM=$(taskset --cpu-list -a -p "${QEMU_PID}" | grep -cE ':\s+[0-9]+$')
[ "${PINNED_THREADS_NUM}" -ge "$(lxc config get v1 limits.cpu)" ]

# Unset CPU limit
lxc config unset v1 limits.cpu

# Unsetting the limit should leave the VM with 1 CPU
[ "$(lxc exec v1 -- ls /sys/devices/system/cpu | grep -xEc 'cpu[[:digit:]]+')" -eq "1" ]

echo "==> Stopping and deleting ephemeral VM"
# Stop VM and check its deleted.
echo "==> Create ephemeral VM with CPU pinning disabled and boot"
lxc launch "${IMAGE}" v2 --vm -c limits.cpu.pin_strategy=none -s "${poolName}" --ephemeral
waitInstanceReady v2
lxc info v2

QEMU_PID=$(lxc info v2 | awk '/^PID:/ {print $2}')

echo "==> Check that there is no CPU pinning set"
! taskset --cpu-list -a -p "${QEMU_PID}" | grep -E ':\s+[0-9]+$' || false
taskset --cpu-list -a -p "${QEMU_PID}" | grep "0-$((cpuCount-1))"

echo "==> Stopping and deleting ephemeral VMs"
# Stop VMs and ensure deleted.
lxc stop -f v1
! lxc info v1 || false
lxc stop -f v2
! lxc info v2 || false

lxc profile device remove default eth0
lxc profile unset default limits.kernel.nofile
Expand Down

0 comments on commit 00b31d1

Please sign in to comment.