From 4ed7e4a31a630e90f511add0d07e39f169cf615f Mon Sep 17 00:00:00 2001 From: liang-cong-red-hat Date: Mon, 30 Dec 2024 04:44:20 -0500 Subject: [PATCH] Change fixed huge page size to default huge page size of host Signed-off-by: liang-cong-red-hat --- .../tests/cfg/numa/numa_numanode_cpu_info.cfg | 5 ++- .../tests/src/numa/numa_numanode_cpu_info.py | 44 +++++++++++-------- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/libvirt/tests/cfg/numa/numa_numanode_cpu_info.cfg b/libvirt/tests/cfg/numa/numa_numanode_cpu_info.cfg index 1f44f49407..4e9de5800f 100644 --- a/libvirt/tests/cfg/numa/numa_numanode_cpu_info.cfg +++ b/libvirt/tests/cfg/numa/numa_numanode_cpu_info.cfg @@ -6,5 +6,8 @@ variants: - default: err_msg = 'unable to map backing store for guest RAM: Cannot allocate memory' - nodes_pages = ['900', '300'] + current_memory_size = 4194304 + memory_size = 4194304 + nodes_memory = ['1572864', '524288'] memory_mode = "strict" + diff --git a/libvirt/tests/src/numa/numa_numanode_cpu_info.py b/libvirt/tests/src/numa/numa_numanode_cpu_info.py index 76ccdcc746..124b9557e9 100644 --- a/libvirt/tests/src/numa/numa_numanode_cpu_info.py +++ b/libvirt/tests/src/numa/numa_numanode_cpu_info.py @@ -7,6 +7,7 @@ from virttest import utils_misc from virttest import utils_test from virttest import virsh +from virttest.staging import utils_memory # Using as lower capital is not the best way to do, but this is just a @@ -23,45 +24,51 @@ def update_xml(vm_name, online_nodes, params): """ vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) memory_mode = params.get("memory_mode") + memory_size = int(params.get("memory_size")) + current_memory_size = int(params.get("current_memory_size")) numa_memory = {'mode': memory_mode, 'nodeset': online_nodes[1]} vmxml.numa_memory = numa_memory mb_xml = libvirt_xml.vm_xml.VMMemBackingXML() mb_xml.hugepages = libvirt_xml.vm_xml.VMHugepagesXML() vmxml.mb = mb_xml + vmxml.memory = memory_size + vmxml.current_mem = current_memory_size logging.debug("vm xml is %s", vmxml) vmxml.sync() -def setup_host(required_node_num, online_nodes, pages_list, ori_page_set): +def setup_host(required_node_num, online_nodes, memory_list, ori_page_set): """ Setup host for test - update number of hugepages and check :param required_node_num: int, numa node number at least on the host required by the test :param online_nodes: List of all online nodes with memory available - :param pages_list: List of required number of pages for particular nodes + :param memory_list: List of required hugepage memory for particular nodes :param ori_page_set: A dict used to save original node page """ index = 0 if len(online_nodes) >= required_node_num: - for pages in pages_list: + hugepage_size = utils_memory.get_huge_page_size() + for memory_size in memory_list: ori_page_set[online_nodes[index]] = process.run( - 'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(online_nodes[index]), shell=True).stdout_text.strip() + 'cat /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'. + format(online_nodes[index], hugepage_size), shell=True).stdout_text.strip() logging.debug("ori_page_set is {}".format(ori_page_set)) + pages = int(int(memory_size) / hugepage_size) ret = process.run( - 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(pages, online_nodes[index]), shell=True) + 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'. + format(pages, online_nodes[index], hugepage_size), shell=True) if ret.exit_status: - raise TestError('Cannot set {} hugepages on node {}'. - format(pages, online_nodes[index])) + raise TestError('Cannot set {} pages for {}kB huge page on node {}'. + format(pages, hugepage_size, online_nodes[index])) ret = process.run( - 'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(online_nodes[index]), shell=True) - if pages not in ret.stdout_text: - raise TestError('Setting {} hugepages on node {} was unsuccessful'. - format(pages, online_nodes[index])) + 'cat /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'. + format(online_nodes[index], hugepage_size), shell=True) + if str(pages) not in ret.stdout_text: + raise TestError('Setting {} pages for {}kB huge page on node {} was unsuccessful'. + format(pages, hugepage_size, online_nodes[index])) index += 1 else: raise TestCancel("The test cannot continue since there is no enough " @@ -75,13 +82,13 @@ def run(test, params, env): vm_name = params.get("main_vm") vm = env.get_vm(vm_name) error_message = params.get("err_msg") - pages_list = eval(params.get('nodes_pages')) + node_memory_list = eval(params.get('nodes_memory')) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_info = utils_misc.NumaInfo() online_nodes = numa_info.get_online_nodes_withmem() ori_page_set = {} required_numa_node_num = int(params.get("numa_cells_with_memory_required", '2')) - setup_host(required_numa_node_num, online_nodes, pages_list, ori_page_set) + setup_host(required_numa_node_num, online_nodes, node_memory_list, ori_page_set) try: if vm.is_alive(): vm.destroy() @@ -94,8 +101,9 @@ def run(test, params, env): except Exception as e: test.error("Unexpected error: {}".format(e)) finally: + hugepage_size = utils_memory.get_huge_page_size() for node_index, ori_page in ori_page_set.items(): process.run( - 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(ori_page, node_index), shell=True) + 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'. + format(ori_page, node_index, hugepage_size), shell=True) backup_xml.sync()