From c0e086209281194e2118904e89add4935a2cf014 Mon Sep 17 00:00:00 2001 From: Ashok_daparthi Date: Thu, 20 May 2021 01:47:15 +0000 Subject: [PATCH] QOS field reference ABNF format to string changes --- cfgmgr/buffer_pool_mellanox.lua | 2 +- cfgmgr/buffermgr.cpp | 23 ++------ cfgmgr/buffermgrdyn.cpp | 27 ++------- doc/Configuration.md | 42 +++++++------- doc/swss-schema.md | 8 +-- orchagent/bufferorch.cpp | 30 +++++++--- orchagent/orch.cpp | 55 +++++++----------- orchagent/orch.h | 6 +- orchagent/qosorch.cpp | 25 ++++++-- swssconfig/sample/sample.json | 10 ++-- swssconfig/sample/sample.json.output.txt | 8 +-- tests/buffer_model.py | 2 +- tests/mock_tests/portsorch_ut.cpp | 14 ++--- tests/test_buffer_dynamic.py | 74 ++++++++++++------------ tests/test_qos_map.py | 2 +- tests/test_speed.py | 2 +- 16 files changed, 157 insertions(+), 173 deletions(-) diff --git a/cfgmgr/buffer_pool_mellanox.lua b/cfgmgr/buffer_pool_mellanox.lua index f27316d2b7e..74552355dd9 100644 --- a/cfgmgr/buffer_pool_mellanox.lua +++ b/cfgmgr/buffer_pool_mellanox.lua @@ -60,7 +60,7 @@ local function iterate_all_items(all_items) profiles[index][2] = profiles[index][2] + size local speed = redis.call('HGET', 'PORT_TABLE:'..port, 'speed') if speed == '400000' then - if profile == '[BUFFER_PROFILE_TABLE:ingress_lossy_profile]' then + if profile == 'ingress_lossy_profile' then lossypg_400g = lossypg_400g + size end port_count_400g = port_count_400g + 1 diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 7b1b73faffb..9e2c1ec8781 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -106,7 +106,7 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( "BUFFER_PROFILE": { "pg_lossless_100G_300m_profile": { - "pool":"[BUFFER_POOL_TABLE:ingress_lossless_pool]", + "pool":"ingress_lossless_pool", "xon":"18432", "xon_offset":"2496", "xoff":"165888", @@ -116,7 +116,7 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } "BUFFER_PG" :{ Ethernet44|3-4": { - "profile" : "[BUFFER_PROFILE:pg_lossless_100000_300m_profile]" + "profile" : "pg_lossless_100000_300m_profile" } } */ @@ -160,11 +160,8 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, string speed) // profile threshold field name mode += "_th"; - string pg_pool_reference = string(CFG_BUFFER_POOL_TABLE_NAME) + - m_cfgBufferProfileTable.getTableNameSeparator() + - INGRESS_LOSSLESS_PG_POOL_NAME; - fvVector.push_back(make_pair("pool", "[" + pg_pool_reference + "]")); + fvVector.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); fvVector.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { fvVector.push_back(make_pair("xon_offset", @@ -184,11 +181,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, string speed) string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + LOSSLESS_PGS; - string profile_ref = string("[") + - CFG_BUFFER_PROFILE_TABLE_NAME + - m_cfgBufferPgTable.getTableNameSeparator() + - buffer_profile_key + - "]"; + string profile_ref = buffer_profile_key; /* Check if PG Mapping is already then log message and return. */ m_cfgBufferPgTable.get(buffer_pg_key, fvVector); @@ -284,14 +277,6 @@ void BufferMgr::doBufferTableTask(Consumer &consumer, ProducerStateTable &applTa for (auto i : kfvFieldsValues(t)) { - SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); - //transform the separator in values from "|" to ":" - if (fvField(i) == "pool") - transformReference(fvValue(i)); - if (fvField(i) == "profile") - transformReference(fvValue(i)); - if (fvField(i) == "profile_list") - transformReference(fvValue(i)); fvVector.emplace_back(FieldValueTuple(fvField(i), fvValue(i))); SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); } diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 4dd69840992..9159dc3b856 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -526,9 +526,6 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ // profile threshold field name mode += "_th"; - string pg_pool_reference = string(APP_BUFFER_POOL_TABLE_NAME) + - m_applBufferProfileTable.getTableNameSeparator() + - INGRESS_LOSSLESS_PG_POOL_NAME; fvVector.emplace_back("xon", profile.xon); if (!profile.xon_offset.empty()) { @@ -536,7 +533,7 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ } fvVector.emplace_back("xoff", profile.xoff); fvVector.emplace_back("size", profile.size); - fvVector.emplace_back("pool", "[" + pg_pool_reference + "]"); + fvVector.emplace_back("pool", INGRESS_LOSSLESS_PG_POOL_NAME); fvVector.emplace_back(mode, profile.threshold); m_applBufferProfileTable.set(name, fvVector); @@ -553,15 +550,7 @@ void BufferMgrDynamic::updateBufferPgToDb(const string &key, const string &profi fvVector.clear(); - string profile_ref = string("[") + - APP_BUFFER_PROFILE_TABLE_NAME + - m_applBufferPgTable.getTableNameSeparator() + - profile + - "]"; - - fvVector.clear(); - - fvVector.push_back(make_pair("profile", profile_ref)); + fvVector.push_back(make_pair("profile", profile)); m_applBufferPgTable.set(key, fvVector); } else @@ -1632,8 +1621,7 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues { if (!value.empty()) { - transformReference(value); - auto poolName = parseObjectNameFromReference(value); + auto poolName = value; if (poolName.empty()) { SWSS_LOG_ERROR("BUFFER_PROFILE: Invalid format of reference to pool: %s", value.c_str()); @@ -1806,8 +1794,7 @@ task_process_status BufferMgrDynamic::handleOneBufferPgEntry(const string &key, { // Headroom override pureDynamic = false; - transformReference(value); - string profileName = parseObjectNameFromReference(value); + string profileName = value; if (profileName.empty()) { SWSS_LOG_ERROR("BUFFER_PG: Invalid format of reference to profile: %s", value.c_str()); @@ -2023,12 +2010,6 @@ task_process_status BufferMgrDynamic::doBufferTableTask(KeyOpFieldsValuesTuple & for (auto i : kfvFieldsValues(tuple)) { // Transform the separator in values from "|" to ":" - if (fvField(i) == "pool") - transformReference(fvValue(i)); - if (fvField(i) == "profile") - transformReference(fvValue(i)); - if (fvField(i) == "profile_list") - transformReference(fvValue(i)); fvVector.emplace_back(fvField(i), fvValue(i)); SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); } diff --git a/doc/Configuration.md b/doc/Configuration.md index fcec60be43a..a619531f70f 100644 --- a/doc/Configuration.md +++ b/doc/Configuration.md @@ -343,13 +343,13 @@ When the system is running in traditional buffer model, profiles needs to explic { "BUFFER_PG": { "Ethernet0|3-4": { - "profile": "[BUFFER_PROFILE|pg_lossless_40000_5m_profile]" + "profile": "pg_lossless_40000_5m_profile" }, "Ethernet1|3-4": { - "profile": "[BUFFER_PROFILE|pg_lossless_40000_5m_profile]" + "profile": "pg_lossless_40000_5m_profile" }, "Ethernet2|3-4": { - "profile": "[BUFFER_PROFILE|pg_lossless_40000_5m_profile]" + "profile": "pg_lossless_40000_5m_profile" } } } @@ -371,7 +371,7 @@ When the system is running in dynamic buffer model, profiles can be: "profile": "NULL" }, "Ethernet2|3-4": { - "profile": "[BUFFER_PROFILE|static_profile]" + "profile": "static_profile" } } } @@ -437,17 +437,17 @@ When the system is running in dynamic buffer model, the size of some of the buff "BUFFER_PROFILE": { "egress_lossless_profile": { "static_th": "3995680", - "pool": "[BUFFER_POOL|egress_lossless_pool]", + "pool": "egress_lossless_pool", "size": "1518" }, "egress_lossy_profile": { "dynamic_th": "3", - "pool": "[BUFFER_POOL|egress_lossy_pool]", + "pool": "egress_lossy_pool", "size": "1518" }, "ingress_lossy_profile": { "dynamic_th": "3", - "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "pool": "ingress_lossless_pool", "size": "0" }, "pg_lossless_40000_5m_profile": { @@ -455,7 +455,7 @@ When the system is running in dynamic buffer model, the size of some of the buff "dynamic_th": "-3", "xon": "2288", "xoff": "66560", - "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "pool": "ingress_lossless_pool", "size": "1248" }, "pg_lossless_40000_40m_profile": { @@ -463,7 +463,7 @@ When the system is running in dynamic buffer model, the size of some of the buff "dynamic_th": "-3", "xon": "2288", "xoff": "71552", - "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "pool": "ingress_lossless_pool", "size": "1248" } } @@ -491,13 +491,13 @@ This kind of profiles will be handled by buffer manager and won't be applied to { "BUFFER_QUEUE": { "Ethernet50,Ethernet52,Ethernet54,Ethernet56|0-2": { - "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + "profile": "egress_lossy_profile" }, "Ethernet50,Ethernet52,Ethernet54,Ethernet56|3-4": { - "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + "profile": "egress_lossless_profile" }, "Ethernet50,Ethernet52,Ethernet54,Ethernet56|5-6": { - "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + "profile": "egress_lossy_profile" } } } @@ -1104,12 +1104,12 @@ name as object key and member list as attribute. { "PORT_QOS_MAP": { "Ethernet50,Ethernet52,Ethernet54,Ethernet56": { - "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE", "pfc_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", - "scheduler": "[SCHEDULER|scheduler.port]" + "pfc_to_queue_map": "AZURE", + "dscp_to_tc_map": "AZURE", + "scheduler": "scheduler.port" } } } @@ -1120,14 +1120,14 @@ name as object key and member list as attribute. { "QUEUE": { "Ethernet56|4": { - "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", - "scheduler": "[SCHEDULER|scheduler.1]" + "wred_profile": "AZURE_LOSSLESS", + "scheduler": "scheduler.1" }, "Ethernet56|5": { - "scheduler": "[SCHEDULER|scheduler.0]" + "scheduler": "scheduler.0" }, "Ethernet56|6": { - "scheduler": "[SCHEDULER|scheduler.0]" + "scheduler": "scheduler.0" } } } diff --git a/doc/swss-schema.md b/doc/swss-schema.md index 7f25803a287..3ccc7b74afe 100644 --- a/doc/swss-schema.md +++ b/doc/swss-schema.md @@ -35,9 +35,9 @@ Stores information for physical switch ports managed by the switch chip. Ports t Example: 127.0.0.1:6379> hgetall PORT_TABLE:ETHERNET4 1) "dscp_to_tc_map" - 2) "[DSCP_TO_TC_MAP_TABLE:AZURE]" + 2) "AZURE" 3) "tc_to_queue_map" - 4) "[TC_TO_QUEUE_MAP_TABLE:AZURE]" + 4) "AZURE" --------------------------------------------- ### INTF_TABLE @@ -209,9 +209,9 @@ and reflects the LAG ports into the redis under: `LAG_TABLE::port` Example: 127.0.0.1:6379> hgetall QUEUE_TABLE:ETHERNET4:1 1) "scheduler" - 2) "[SCHEDULER_TABLE:BEST_EFFORT]" + 2) "BEST_EFFORT" 3) "wred_profile" - 4) "[WRED_PROFILE_TABLE:AZURE]" + 4) "AZURE" --------------------------------------------- ### TC\_TO\_QUEUE\_MAP\_TABLE diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 59fcce590ee..3195c950790 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -36,6 +36,12 @@ type_map BufferOrch::m_buffer_type_maps = { {APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, new object_reference_map()} }; +map buffer_to_ref_table_map = { + {buffer_pool_field_name, APP_BUFFER_POOL_TABLE_NAME}, + {buffer_profile_field_name, APP_BUFFER_PROFILE_TABLE_NAME}, + {buffer_profile_list_field_name, APP_BUFFER_PROFILE_TABLE_NAME} +}; + BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *stateDb, vector &tableNames) : Orch(applDb, tableNames), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), @@ -512,7 +518,9 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } sai_object_id_t sai_pool; - ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_pool_field_name, tuple, sai_pool, pool_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_pool_field_name, + buffer_to_ref_table_map.at(buffer_pool_field_name), + tuple, sai_pool, pool_name); if (ref_resolve_status::success != resolve_result) { if(ref_resolve_status::not_resolved == resolve_result) @@ -701,7 +709,9 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) if (op == SET_COMMAND) { - ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, tuple, sai_buffer_profile, buffer_profile_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { if (ref_resolve_status::not_resolved == resolve_result) @@ -824,7 +834,9 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { - ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, tuple, sai_buffer_profile, buffer_profile_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { if (ref_resolve_status::not_resolved == resolve_result) @@ -928,7 +940,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup } /* -Input sample:"[BUFFER_PROFILE_TABLE:i_port.profile0],[BUFFER_PROFILE_TABLE:i_port.profile1]" +Input sample:"i_port.profile0,i_port.profile1" */ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValuesTuple &tuple) { @@ -943,7 +955,9 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue vector profile_list; string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, tuple, profile_list, profile_name_list); + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); if (ref_resolve_status::success != resolve_status) { if(ref_resolve_status::not_resolved == resolve_status) @@ -984,7 +998,7 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue } /* -Input sample:"[BUFFER_PROFILE_TABLE:e_port.profile0],[BUFFER_PROFILE_TABLE:e_port.profile1]" +Input sample:"e_port.profile0,e_port.profile1" */ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValuesTuple &tuple) { @@ -997,7 +1011,9 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues vector profile_list; string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, tuple, profile_list, profile_name_list); + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); if (ref_resolve_status::success != resolve_status) { if(ref_resolve_status::not_resolved == resolve_status) diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 464e6aa6888..622c0724419 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -301,7 +301,7 @@ bool Orch::bake() } /* -- Validates reference has proper format which is [table_name:object_name] +- Validates reference has proper format which is not ABNF [table_name:object_name] - validates table_name exists - validates object with object_name exists @@ -310,59 +310,42 @@ bool Orch::bake() - both type_name and object_name are cleared to empty strings as an - indication to the caller of the special case */ -bool Orch::parseReference(type_map &type_maps, string &ref_in, string &type_name, string &object_name) +bool Orch::parseReference(type_map &type_maps, string &ref_in, const string &type_name, string &object_name) { SWSS_LOG_ENTER(); SWSS_LOG_DEBUG("input:%s", ref_in.c_str()); - if (ref_in.size() < 2) - { - SWSS_LOG_ERROR("invalid reference received:%s\n", ref_in.c_str()); - return false; - } - if ((ref_in[0] != ref_start) || (ref_in[ref_in.size()-1] != ref_end)) - { - SWSS_LOG_ERROR("malformed reference:%s. Must be surrounded by [ ]\n", ref_in.c_str()); - return false; - } - if (ref_in.size() == 2) + + if (ref_in.size() == 0) { - // value set by user is "[]" + // value set by user is "" // Deem it as a valid format // clear both type_name and object_name // as an indication to the caller that // such a case has been encountered - type_name.clear(); object_name.clear(); return true; } - string ref_content = ref_in.substr(1, ref_in.size() - 2); - vector tokens; - tokens = tokenize(ref_content, delimiter); - if (tokens.size() != 2) + + if ((ref_in[0] == ref_start) || (ref_in[ref_in.size()-1] == ref_end)) { - tokens = tokenize(ref_content, config_db_key_delimiter); - if (tokens.size() != 2) - { - SWSS_LOG_ERROR("malformed reference:%s. Must contain 2 tokens\n", ref_content.c_str()); - return false; - } + SWSS_LOG_ERROR("malformed reference:%s. Must not be surrounded by [ ]\n", ref_in.c_str()); + return false; } - auto type_it = type_maps.find(tokens[0]); + auto type_it = type_maps.find(type_name); if (type_it == type_maps.end()) { - SWSS_LOG_ERROR("not recognized type:%s\n", tokens[0].c_str()); + SWSS_LOG_ERROR("not recognized type:%s\n", type_name.c_str()); return false; } - auto obj_map = type_maps[tokens[0]]; - auto obj_it = obj_map->find(tokens[1]); + auto obj_map = type_maps[type_name]; + auto obj_it = obj_map->find(ref_in); if (obj_it == obj_map->end()) { - SWSS_LOG_INFO("map:%s does not contain object with name:%s\n", tokens[0].c_str(), tokens[1].c_str()); + SWSS_LOG_INFO("map:%s does not contain object with name:%s\n", type_name.c_str(), ref_in.c_str()); return false; } - type_name = tokens[0]; - object_name = tokens[1]; + object_name = ref_in; SWSS_LOG_DEBUG("parsed: type_name:%s, object_name:%s", type_name.c_str(), object_name.c_str()); return true; } @@ -370,6 +353,7 @@ bool Orch::parseReference(type_map &type_maps, string &ref_in, string &type_name ref_resolve_status Orch::resolveFieldRefValue( type_map &type_maps, const string &field_name, + const string &ref_type_name, KeyOpFieldsValuesTuple &tuple, sai_object_id_t &sai_object, string &referenced_object_name) @@ -387,7 +371,7 @@ ref_resolve_status Orch::resolveFieldRefValue( SWSS_LOG_ERROR("Multiple same fields %s", field_name.c_str()); return ref_resolve_status::multiple_instances; } - string ref_type_name, object_name; + string object_name; if (!parseReference(type_maps, fvValue(*i), ref_type_name, object_name)) { return ref_resolve_status::not_resolved; @@ -568,11 +552,12 @@ string Orch::dumpTuple(Consumer &consumer, const KeyOpFieldsValuesTuple &tuple) ref_resolve_status Orch::resolveFieldRefArray( type_map &type_maps, const string &field_name, + const string &ref_type_name, KeyOpFieldsValuesTuple &tuple, vector &sai_object_arr, string &object_name_list) { - // example: [BUFFER_PROFILE_TABLE:e_port.profile0],[BUFFER_PROFILE_TABLE:e_port.profile1] + // example: e_port.profile0,e_port.profile1 SWSS_LOG_ENTER(); size_t count = 0; sai_object_arr.clear(); @@ -585,7 +570,7 @@ ref_resolve_status Orch::resolveFieldRefArray( SWSS_LOG_ERROR("Singleton field with name:%s must have only 1 instance, actual count:%zd\n", field_name.c_str(), count); return ref_resolve_status::multiple_instances; } - string ref_type_name, object_name; + string object_name; string list = fvValue(*i); vector list_items; if (list.find(list_item_delimiter) != string::npos) diff --git a/orchagent/orch.h b/orchagent/orch.h index b61cdb53e2b..0cb567718ba 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -223,10 +223,10 @@ class Orch static void logfileReopen(); std::string dumpTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); - ref_resolve_status resolveFieldRefValue(type_map&, const std::string&, swss::KeyOpFieldsValuesTuple&, sai_object_id_t&, std::string&); + ref_resolve_status resolveFieldRefValue(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, sai_object_id_t&, std::string&); bool parseIndexRange(const std::string &input, sai_uint32_t &range_low, sai_uint32_t &range_high); - bool parseReference(type_map &type_maps, std::string &ref, std::string &table_name, std::string &object_name); - ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); + bool parseReference(type_map &type_maps, std::string &ref, const std::string &table_name, std::string &object_name); + ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); void setObjectReference(type_map&, const std::string&, const std::string&, const std::string&, const std::string&); void removeObject(type_map&, const std::string&, const std::string&); bool isObjectBeingReferenced(type_map&, const std::string&, const std::string&); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 31e61b5433e..47351f8d7ff 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -71,6 +71,18 @@ type_map QosOrch::m_qos_maps = { {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()} }; +map qos_to_ref_table_map = { + {dscp_to_tc_field_name, CFG_DSCP_TO_TC_MAP_TABLE_NAME}, + {dot1p_to_tc_field_name, CFG_DOT1P_TO_TC_MAP_TABLE_NAME}, + {tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME}, + {tc_to_pg_map_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, + {pfc_to_pg_map_name, CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, + {pfc_to_queue_map_name, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME}, + {scheduler_field_name, CFG_SCHEDULER_TABLE_NAME}, + {wred_profile_field_name, CFG_WRED_PROFILE_TABLE_NAME} +}; + + task_process_status QosMapHandler::processWorkItem(Consumer& consumer) { SWSS_LOG_ENTER(); @@ -1122,7 +1134,9 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) SWSS_LOG_DEBUG("processing queue:%zd", queue_ind); sai_object_id_t sai_scheduler_profile; string scheduler_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, tuple, sai_scheduler_profile, scheduler_profile_name); + resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, + qos_to_ref_table_map.at(scheduler_field_name), tuple, + sai_scheduler_profile, scheduler_profile_name); if (ref_resolve_status::success == resolve_result) { if (op == SET_COMMAND) @@ -1159,7 +1173,9 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) sai_object_id_t sai_wred_profile; string wred_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, tuple, sai_wred_profile, wred_profile_name); + resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, + qos_to_ref_table_map.at(wred_profile_field_name), tuple, + sai_wred_profile, wred_profile_name); if (ref_resolve_status::success == resolve_result) { if (op == SET_COMMAND) @@ -1247,7 +1263,8 @@ task_process_status QosOrch::ResolveMapAndApplyToPort( sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; string object_name; bool result; - ref_resolve_status resolve_result = resolveFieldRefValue(m_qos_maps, field_name, tuple, sai_object, object_name); + ref_resolve_status resolve_result = resolveFieldRefValue(m_qos_maps, field_name, + qos_to_ref_table_map.at(field_name), tuple, sai_object, object_name); if (ref_resolve_status::success == resolve_result) { if (op == SET_COMMAND) @@ -1303,7 +1320,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) sai_object_id_t id; string object_name; string map_type_name = fvField(*it), map_name = fvValue(*it); - ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, tuple, id, object_name); + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); if (status != ref_resolve_status::success) { diff --git a/swssconfig/sample/sample.json b/swssconfig/sample/sample.json index 7fb0ff0ac2b..c00cdb0b9f0 100644 --- a/swssconfig/sample/sample.json +++ b/swssconfig/sample/sample.json @@ -18,8 +18,8 @@ }, { "QOS_TABLE:PORT_TABLE:ETHERNET4": { - "dscp_to_tc_map" : "[DSCP_TO_TC_MAP_TABLE:AZURE]", - "tc_to_queue_map": "[TC_TO_QUEUE_MAP_TABLE:AZURE]" + "dscp_to_tc_map" : "AZURE", + "tc_to_queue_map": "AZURE" }, "OP": "SET" }, @@ -46,9 +46,9 @@ }, { "QUEUE_TABLE:ETHERNET4:1" : { - "scheduler" : "[SCHEDULER_TABLE:BEST_EFFORT]", - "wred_profile" : "[WRED_PROFILE_TABLE:AZURE]" + "scheduler" : "BEST_EFFORT", + "wred_profile" : "AZURE" }, "OP": "SET" } - ] \ No newline at end of file + ] diff --git a/swssconfig/sample/sample.json.output.txt b/swssconfig/sample/sample.json.output.txt index 8508de60c6f..11f4203771c 100644 --- a/swssconfig/sample/sample.json.output.txt +++ b/swssconfig/sample/sample.json.output.txt @@ -67,14 +67,14 @@ hgetall WRED_PROFILE_TABLE:AZURE 10) "8" 127.0.0.1:6379> hgetall QUEUE_TABLE:ETHERNET4:1 1) "scheduler" -2) "[SCHEDULER_TABLE:BEST_EFFORT]" +2) "BEST_EFFORT" 3) "wred_profile" -4) "[WRED_PROFILE_TABLE:AZURE]" +4) "AZURE" 127.0.0.1:6379> hgetall PORT_TABLE:ETHERNET4 1) "dscp_to_tc_map" -2) "[DSCP_TO_TC_MAP_TABLE:AZURE]" +2) "AZURE" 3) "tc_to_queue_map" -4) "[TC_TO_QUEUE_MAP_TABLE:AZURE]" +4) "AZURE" 127.0.0.1:6379> hgetall TC_TO_QUEUE_MAP_TABLE:AZURE diff --git a/tests/buffer_model.py b/tests/buffer_model.py index 51f6305c33d..ae2d1ecb796 100644 --- a/tests/buffer_model.py +++ b/tests/buffer_model.py @@ -69,7 +69,7 @@ def disable_dynamic_buffer(config_db, cmd_runner): pgs = config_db.get_keys('BUFFER_PG') for key in pgs: pg = config_db.get_entry('BUFFER_PG', key) - if pg['profile'] != '[BUFFER_PROFILE|ingress_lossy_profile]': + if pg['profile'] != 'ingress_lossy_profile': config_db.delete_entry('BUFFER_PG', key) # Remove all the non-default profiles diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 365c5144b5b..172b992eb23 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -116,7 +116,7 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "[BUFFER_POOL_TABLE:test_pool]" }, + profileTable.set("test_profile", { { "pool", "test_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, @@ -127,9 +127,9 @@ namespace portsorch_test { std::ostringstream ossAppl, ossCfg; ossAppl << it.first << ":3-4"; - pgTable.set(ossAppl.str(), { { "profile", "[BUFFER_PROFILE_TABLE:test_profile]" } }); + pgTable.set(ossAppl.str(), { { "profile", "test_profile" } }); ossCfg << it.first << "|3-4"; - pgTableCfg.set(ossCfg.str(), { { "profile", "[BUFFER_PROFILE|test_profile]" } }); + pgTableCfg.set(ossCfg.str(), { { "profile", "test_profile" } }); } // Create dependencies ... @@ -238,7 +238,7 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "[BUFFER_POOL_TABLE:test_pool]" }, + profileTable.set("test_profile", { { "pool", "test_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, @@ -249,7 +249,7 @@ namespace portsorch_test { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "[BUFFER_PROFILE_TABLE:test_profile]" } }); + pgTable.set(oss.str(), { { "profile", "test_profile" } }); } // Populate pot table with SAI ports @@ -404,7 +404,7 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "[BUFFER_POOL_TABLE:test_pool]" }, + profileTable.set("test_profile", { { "pool", "test_pool" }, { "xon", "14832" }, { "xoff", "14832" }, { "size", "35000" }, @@ -415,7 +415,7 @@ namespace portsorch_test { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "[BUFFER_PROFILE_TABLE:test_profile]" } }); + pgTable.set(oss.str(), { { "profile", "test_profile" } }); } gBufferOrch->addExistingData(&pgTable); gBufferOrch->addExistingData(&poolTable); diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 74575c6ee31..9a4f396d061 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -158,7 +158,7 @@ def test_changeSpeed(self, dvs, testlog): self.check_new_profile_in_asic_db(dvs, expectedProfile) # Check whether buffer pg align - bufferPg = self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + bufferPg = self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove lossless PG self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -170,7 +170,7 @@ def test_changeSpeed(self, dvs, testlog): # Re-add another lossless PG self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'NULL'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove the lossless PG 6 self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') @@ -183,7 +183,7 @@ def test_changeSpeed(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -206,7 +206,7 @@ def test_changeCableLen(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.cableLenTest1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove the lossless PGs self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -223,7 +223,7 @@ def test_changeCableLen(self, dvs, testlog): # Check the BUFFER_PROFILE_TABLE and BUFFER_PG_TABLE self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Revert the cable length self.change_cable_length(self.originalCableLen) @@ -234,7 +234,7 @@ def test_changeCableLen(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -254,14 +254,14 @@ def test_MultipleLosslessPg(self, dvs, testlog): # Add another lossless PG self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'NULL'}) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Change speed and check dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest1) expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Change cable length and check self.change_cable_length(self.cableLenTest1) @@ -269,8 +269,8 @@ def test_MultipleLosslessPg(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.cableLenTest1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.check_new_profile_in_asic_db(dvs, expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Revert the speed and cable length and check self.change_cable_length(self.originalCableLen) @@ -279,8 +279,8 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.asic_db.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove lossless PG 3-4 and 6 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -301,11 +301,11 @@ def test_headroomOverride(self, dvs, testlog): 'xoff': '16384', 'size': '34816', 'dynamic_th': '0', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", "test") self.app_db.wait_for_exact_match("BUFFER_PROFILE_TABLE", "test", - { "pool" : "[BUFFER_POOL_TABLE:ingress_lossless_pool]", + { "pool" : "ingress_lossless_pool", "xon" : "18432", "xoff" : "16384", "size" : "34816", @@ -319,14 +319,14 @@ def test_headroomOverride(self, dvs, testlog): self.change_cable_length(self.cableLenTest1) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.cableLenTest1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # configure lossless PG 3-4 with headroom override - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': '[BUFFER_PROFILE|test]'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:test]"}) + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) # configure lossless PG 6 with headroom override - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': '[BUFFER_PROFILE|test]'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:test]"}) + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "test"}) # update the profile self.config_db.update_entry('BUFFER_PROFILE', 'test', @@ -334,9 +334,9 @@ def test_headroomOverride(self, dvs, testlog): 'xoff': '18432', 'size': '36864', 'dynamic_th': '0', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) self.app_db.wait_for_exact_match("BUFFER_PROFILE_TABLE", "test", - { "pool" : "[BUFFER_POOL_TABLE:ingress_lossless_pool]", + { "pool" : "ingress_lossless_pool", "xon" : "18432", "xoff" : "18432", "size" : "36864", @@ -353,7 +353,7 @@ def test_headroomOverride(self, dvs, testlog): # readd lossless PG with dynamic profile self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # remove the headroom override profile self.config_db.delete_entry('BUFFER_PROFILE', 'test') @@ -364,7 +364,7 @@ def test_headroomOverride(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfile) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -392,13 +392,13 @@ def test_mtuUpdate(self, dvs, testlog): self.app_db.wait_for_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfileMtu) self.check_new_profile_in_asic_db(dvs, expectedProfileMtu) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfileMtu)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "{}".format(expectedProfileMtu)}) dvs.runcmd("config interface mtu Ethernet0 {}".format(default_mtu)) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfileMtu) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfileNormal) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:{}]".format(expectedProfileNormal)}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "{}".format(expectedProfileNormal)}) # clear configuration self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -421,25 +421,25 @@ def test_nonDefaultAlpha(self, dvs, testlog): self.config_db.update_entry('BUFFER_PROFILE', 'non-default-dynamic', {'dynamic_th': test_dynamic_th_1, 'headroom_type': 'dynamic', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) # configure lossless PG 3-4 on interface - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': '[BUFFER_PROFILE|non-default-dynamic]'}) + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'non-default-dynamic'}) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile_th1) self.check_new_profile_in_asic_db(dvs, expectedProfile_th1) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile_th1 + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile_th1}) # modify the profile to another dynamic_th self.config_db.update_entry('BUFFER_PROFILE', 'non-default-dynamic', {'dynamic_th': test_dynamic_th_2, 'headroom_type': 'dynamic', - 'pool': '[BUFFER_POOL|ingress_lossless_pool]'}) + 'pool': 'ingress_lossless_pool'}) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfile_th1) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile_th2) self.check_new_profile_in_asic_db(dvs, expectedProfile_th2) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile_th2 + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile_th2}) # clear configuration self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') @@ -459,7 +459,7 @@ def test_sharedHeadroomPool(self, dvs, testlog): expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) self.check_new_profile_in_asic_db(dvs, expectedProfile) profileInApplDb = self.app_db.get_entry('BUFFER_PROFILE_TABLE', expectedProfile) @@ -548,8 +548,8 @@ def test_sharedHeadroomPool(self, dvs, testlog): def test_shutdownPort(self, dvs, testlog): self.setup_db(dvs) - lossy_pg_reference_config_db = '[BUFFER_PROFILE|ingress_lossy_profile]' - lossy_pg_reference_appl_db = '[BUFFER_PROFILE_TABLE:ingress_lossy_profile]' + lossy_pg_reference_config_db = 'ingress_lossy_profile' + lossy_pg_reference_appl_db = 'ingress_lossy_profile' # Startup interface dvs.runcmd('config interface startup Ethernet0') @@ -557,7 +557,7 @@ def test_shutdownPort(self, dvs, testlog): # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) # Shutdown port and check whether all the PGs have been removed dvs.runcmd("config interface shutdown Ethernet0") @@ -578,8 +578,8 @@ def test_shutdownPort(self, dvs, testlog): dvs.runcmd("config interface startup Ethernet0") self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:1", {"profile": lossy_pg_reference_appl_db}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": "[BUFFER_PROFILE_TABLE:" + expectedProfile + "]"}) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile }) + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 32a8b396aa4..1970d5f60b7 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -63,7 +63,7 @@ def find_dot1p_profile(self): def apply_dot1p_profile_on_all_ports(self): tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) - fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_MAP_FIELD, "[" + CFG_DOT1P_TO_TC_MAP_TABLE_NAME + "|" + CFG_DOT1P_TO_TC_MAP_KEY + "]")]) + fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_MAP_FIELD, CFG_DOT1P_TO_TC_MAP_KEY)]) ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() for port in ports: tbl.set(port, fvs) diff --git a/tests/test_speed.py b/tests/test_speed.py index 0f3e51c5d88..7f7b8e7083b 100644 --- a/tests/test_speed.py +++ b/tests/test_speed.py @@ -70,7 +70,7 @@ def test_SpeedAndBufferSet(self, dvs, testlog): expected_pg_table = "Ethernet{}|3-4".format(i * 4) assert expected_pg_table in pg_tables - expected_fields = {"profile": "[BUFFER_PROFILE|{}]".format(expected_new_profile_name)} + expected_fields = {"profile": "{}".format(expected_new_profile_name)} cdb.wait_for_field_match("BUFFER_PG", expected_pg_table, expected_fields)