From 629904c92e591ae4ee52c350ece544364b99d037 Mon Sep 17 00:00:00 2001 From: Antti Kervinen Date: Mon, 10 Jul 2023 11:45:11 +0300 Subject: [PATCH] e2e: test topology-aware NUMA node change by AvailableResources Test that a running container gets reassigned into new CPUs when the CPUs where it used to run are not included in AvailableResources anymore. Tests issue #92. --- .../n4c16/test04-available-resources/code.var.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/e2e/policies.test-suite/topology-aware/n4c16/test04-available-resources/code.var.sh b/test/e2e/policies.test-suite/topology-aware/n4c16/test04-available-resources/code.var.sh index 81e017a35..d505a7623 100644 --- a/test/e2e/policies.test-suite/topology-aware/n4c16/test04-available-resources/code.var.sh +++ b/test/e2e/policies.test-suite/topology-aware/n4c16/test04-available-resources/code.var.sh @@ -17,6 +17,19 @@ verify "cpus['pod1c0'] == {'cpu08', 'cpu09', 'cpu10'}" \ "cpus['pod1c1'] == {'cpu08', 'cpu09', 'cpu10'}" \ "mems['pod1c0'] == {'node2'}" \ "mems['pod1c1'] == {'node2'}" +vm-command "kubectl delete pods pod1 --now" + +# Relaunch NRI-plugins with a set of CPUs from a different socket compared +# to where pod0 is currently running. Restoring pod0 to current CPUs will fail +# so the workload should be moved to new CPUs. +terminate nri-resource-policy +AVAILABLE_CPU="cpuset:0-1,11-14" +nri_resource_policy_cfg=$(instantiate nri-resource-policy-available-resources.cfg) +launch nri-resource-policy +report allowed +verify "cpus['pod0c0'] == {'cpu12', 'cpu13', 'cpu14'}" \ + "mems['pod0c0'] == {'node3'}" + vm-command "kubectl delete pods --all --now" reset counters