From f5251692ac820a3de5d1f4adb7c8d36ec293d2b8 Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Wed, 27 Sep 2023 14:29:21 +0800 Subject: [PATCH] [Mellanox] Update QoS test cases for SN5600 (#9583) For Nvidia SN5600 the formula to calculate the shared buffer size for pg and queue differs from other Spectrum. So, update the corresponding code for it. --- .../qos/files/mellanox/qos_param_generator.py | 75 ++++++++++++++++--- .../qos/files/mellanox/special_qos_config.yml | 37 +++++++++ tests/qos/qos_sai_base.py | 69 ++++++++++++++++- 3 files changed, 169 insertions(+), 12 deletions(-) create mode 100644 tests/qos/files/mellanox/special_qos_config.yml diff --git a/tests/qos/files/mellanox/qos_param_generator.py b/tests/qos/files/mellanox/qos_param_generator.py index 30ca0f3fec..a671e317ee 100644 --- a/tests/qos/files/mellanox/qos_param_generator.py +++ b/tests/qos/files/mellanox/qos_param_generator.py @@ -1,4 +1,8 @@ import math +import yaml +import os + +MELLANOX_QOS_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "special_qos_config.yml") class QosParamMellanox(object): @@ -65,6 +69,7 @@ def run(self): """ self.collect_qos_configurations() self.calculate_parameters() + self.update_special_qos_config() return self.qos_params_mlnx def collect_qos_configurations(self): @@ -81,6 +86,10 @@ def collect_qos_configurations(self): headroom = xon + xoff ingress_lossless_size = int( math.ceil(float(self.ingressLosslessProfile['static_th']) / self.cell_size)) - xon + if self.asic_type == "spc4": + pg_q_alpha = self.ingressLosslessProfile['pg_q_alpha'] + port_alpha = self.ingressLosslessProfile['port_alpha'] + pool_size = int(math.ceil(float(self.ingressLosslessProfile['pool_size']) / self.cell_size)) else: headroom = size ingress_lossless_size = int( @@ -89,6 +98,8 @@ def collect_qos_configurations(self): egress_lossy_size = int(math.ceil(float(self.egressLossyProfile['static_th']) / self.cell_size)) + ingess_lossy_size = int(math.ceil(float(self.ingressLossyProfile['static_th']) / self.cell_size)) + pkts_num_trig_pfc = ingress_lossless_size + xon + hysteresis pkts_num_trig_ingr_drp = ingress_lossless_size + headroom if self.sharedHeadroomPoolSize: @@ -98,7 +109,8 @@ def collect_qos_configurations(self): else: pkts_num_trig_ingr_drp -= self.headroom_overhead pkts_num_dismiss_pfc = ingress_lossless_size + 1 - pkts_num_trig_egr_drp = egress_lossy_size + 1 + pkts_num_trig_egr_drp = egress_lossy_size + 1 if egress_lossy_size <= ingess_lossy_size \ + else ingess_lossy_size + 1 if self.sharedHeadroomPoolSize: src_testPortIds = self.dutConfig['testPortIds'][self.src_dut_index][self.src_asic_index] @@ -109,11 +121,25 @@ def collect_qos_configurations(self): occupancy_per_port = ingress_lossless_size self.qos_parameters['dst_port_id'] = dst_testPortIds[0] pgs_per_port = 2 if not self.dualTor else 4 - for i in range(1, ingress_ports_num_shp): - for j in range(pgs_per_port): - pkts_num_trig_pfc_shp.append(occupancy_per_port + xon + hysteresis) - occupancy_per_port /= 2 - ingress_ports_list_shp.append(src_testPortIds[i]) + occupied_buffer = 0 + if self.asic_type == "spc4": + for i in range(1, ingress_ports_num_shp): + for j in range(pgs_per_port): + pg_occupancy = int(math.ceil( + (pg_q_alpha*port_alpha*(pool_size - occupied_buffer) - pg_q_alpha*occupied_buffer)/( + 1 + pg_q_alpha*port_alpha + pg_q_alpha))) + pkts_num_trig_pfc_shp.append(pg_occupancy + xon + hysteresis) + occupied_buffer += pg_occupancy + # For a new port it should be treated as a smaller pool with the occupancy being 0 + pool_size -= occupied_buffer + occupied_buffer = 0 + ingress_ports_list_shp.append(src_testPortIds[i]) + else: + for i in range(1, ingress_ports_num_shp): + for j in range(pgs_per_port): + pkts_num_trig_pfc_shp.append(occupancy_per_port + xon + hysteresis) + occupancy_per_port //= 2 + ingress_ports_list_shp.append(src_testPortIds[i]) self.qos_parameters['pkts_num_trig_pfc_shp'] = pkts_num_trig_pfc_shp self.qos_parameters['src_port_ids'] = ingress_ports_list_shp self.qos_parameters['pkts_num_hdrm_full'] = xoff - 2 @@ -176,6 +202,7 @@ def calculate_parameters(self): xon['pkts_num_hysteresis'] = pkts_num_hysteresis + 16 xon['pkts_num_margin'] = 3 xon['cell_size'] = self.cell_size + self.qos_params_mlnx['xon_1'].update(xon) self.qos_params_mlnx['xon_2'].update(xon) self.qos_params_mlnx['xon_3'].update(xon) @@ -201,15 +228,11 @@ def calculate_parameters(self): lossy_queue = self.qos_params_mlnx['lossy_queue_1'] lossy_queue['pkts_num_trig_egr_drp'] = pkts_num_trig_egr_drp - 1 lossy_queue['cell_size'] = self.cell_size - if self.asic_type == "spc4": - lossy_queue['packet_size'] = 600 wm_shared_lossy = {} wm_shared_lossy['pkts_num_trig_egr_drp'] = pkts_num_trig_egr_drp wm_shared_lossy['cell_size'] = self.cell_size - wm_shared_lossy["pkts_num_margin"] = 3 - if self.asic_type == "spc4": - wm_shared_lossy["packet_size"] = 600 + wm_shared_lossy["pkts_num_margin"] = 4 self.qos_params_mlnx['wm_pg_shared_lossy'].update(wm_shared_lossy) wm_shared_lossy["pkts_num_margin"] = 8 self.qos_params_mlnx['wm_q_shared_lossy'].update(wm_shared_lossy) @@ -228,3 +251,33 @@ def calculate_parameters(self): self.qos_params_mlnx['shared-headroom-pool'] = self.sharedHeadroomPoolSize self.qos_params_mlnx['pkts_num_private_headrooom'] = self.asic_param_dic[self.asic_type]['private_headroom'] + + def update_special_qos_config(self): + """ + Update qos parameters based on the file of special_qos_config.yml + The format of qos_special_config.yml is same to qos.yml, + and it just list the parameter with different value for the specified asic_type + """ + with open(MELLANOX_QOS_CONFIG_FILE) as file: + special_qos_config_data = yaml.load(file, Loader=yaml.FullLoader) + + def update_dict_value(speical_qos_config_dict, qos_params_dict): + if speical_qos_config_dict: + for key, value in speical_qos_config_dict.items(): + if isinstance(value, dict): + update_dict_value(value, qos_params_dict[key]) + else: + qos_params_dict[key] = value + + special_qos_config = special_qos_config_data.get("qos_params").get(self.asic_type, {}) + if special_qos_config: + for qos_config_key, qos_config_value in special_qos_config.items(): + qos_params_dict = self.qos_params_mlnx[self.speed_cable_len] if qos_config_key == 'profile' \ + else self.qos_params_mlnx[qos_config_key] + + for sub_qos_config_key, sub_qos_config_value in qos_config_value.items(): + if isinstance(sub_qos_config_value, dict): + if sub_qos_config_key in qos_params_dict: + update_dict_value(sub_qos_config_value, qos_params_dict[sub_qos_config_key]) + else: + qos_params_dict[sub_qos_config_key] = sub_qos_config_value diff --git a/tests/qos/files/mellanox/special_qos_config.yml b/tests/qos/files/mellanox/special_qos_config.yml new file mode 100644 index 0000000000..2f1668cf89 --- /dev/null +++ b/tests/qos/files/mellanox/special_qos_config.yml @@ -0,0 +1,37 @@ +# Special qos config +# +qos_params: + spc4: + profile: + pkts_num_leak_out: 1 + xoff_1: + packet_size: 600 + xoff_2: + packet_size: 600 + xoff_3: + packet_size: 600 + xoff_4: + packet_size: 600 + wm_pg_headroom: + packet_size: 600 + wm_q_shared_lossless: + packet_size: 600 + hdrm_pool_size: + packet_size: 600 + xon_1: + packet_size: 600 + xon_2: + packet_size: 600 + xon_3: + packet_size: 600 + xon_4: + packet_size: 600 + lossy_queue_1: + packet_size: 600 + wm_pg_shared_lossless: + packet_size: 600 + pkts_num_margin: 7 + wm_pg_shared_lossy: + packet_size: 600 + wm_q_shared_lossy: + packet_size: 600 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index a1f988d53e..080459b706 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -179,6 +179,70 @@ def __computeBufferThreshold(self, dut_asic, bufferProfile): bufferProfile["size"]) + int(bufferScale * bufferSize)} ) + def __compute_buffer_threshold_for_nvidia_device(self, dut_asic, table, port, pg_q_buffer_profile): + """ + Computes buffer threshold for dynamic threshold profiles for nvidia device + + Args: + dut_asic (SonicAsic): Device ASIC Under Test (DUT) + table (str): Redis table name + port (str): DUT port alias + pg_q_buffer_profile (dict, inout): Map of pg or q buffer profile attributes + + Returns: + Updates bufferProfile with computed buffer threshold + """ + + port_table_name = "BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE" if \ + table == "BUFFER_QUEUE_TABLE" else "BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE" + db = "0" + port_profile_res = dut_asic.run_redis_cmd( + argv=["redis-cli", "-n", db, "HGET", f"{port_table_name}:{port}", "profile_list"] + )[0] + port_profile_list = port_profile_res.split(",") + + port_dynamic_th = '' + for port_profile in port_profile_list: + buffer_pool_name = dut_asic.run_redis_cmd( + argv=["redis-cli", "-n", db, "HGET", f'BUFFER_PROFILE_TABLE:{port_profile}', "pool"] + )[0] + if buffer_pool_name == pg_q_buffer_profile["pool"]: + port_dynamic_th = dut_asic.run_redis_cmd( + argv=["redis-cli", "-n", db, "HGET", f'BUFFER_PROFILE_TABLE:{port_profile}', "dynamic_th"] + )[0] + break + if port_dynamic_th: + + def calculate_alpha(dynamic_th): + if dynamic_th == "7": + alpha = 64 + else: + alpha = 2 ** float(dynamic_th) + return alpha + + pg_q_alpha = calculate_alpha(pg_q_buffer_profile['dynamic_th']) + port_alpha = calculate_alpha(port_dynamic_th) + pool = f'BUFFER_POOL_TABLE:{pg_q_buffer_profile["pool"]}' + buffer_size = int( + dut_asic.run_redis_cmd( + argv=["redis-cli", "-n", db, "HGET", pool, "size"] + )[0] + ) + + buffer_scale = port_alpha * pg_q_alpha / (port_alpha * pg_q_alpha + pg_q_alpha + 1) + + pg_q_max_occupancy = int(buffer_size * buffer_scale) + + pg_q_buffer_profile.update( + {"static_th": int( + pg_q_buffer_profile["size"]) + int(pg_q_max_occupancy)} + ) + pg_q_buffer_profile["pg_q_alpha"] = pg_q_alpha + pg_q_buffer_profile["port_alpha"] = port_alpha + pg_q_buffer_profile["pool_size"] = buffer_size + else: + raise Exception("Not found port dynamic th") + def __updateVoidRoidParams(self, dut_asic, bufferProfile): """ Updates buffer profile with VOID/ROID params @@ -272,7 +336,10 @@ def __getBufferProfile(self, request, dut_asic, os_version, table, port, priorit # Update profile static threshold value if profile threshold is dynamic if "dynamic_th" in list(bufferProfile.keys()): - self.__computeBufferThreshold(dut_asic, bufferProfile) + if dut_asic.sonichost.facts['platform'] == "x86_64-nvidia_sn5600-r0": + self.__compute_buffer_threshold_for_nvidia_device(dut_asic, table, port, bufferProfile) + else: + self.__computeBufferThreshold(dut_asic, bufferProfile) if "pg_lossless" in bufferProfileName: pytest_assert(