From 3ec64d0f4b2b1b1f4291d0114406b4dc483efda2 Mon Sep 17 00:00:00 2001 From: Abhishek Dosi Date: Tue, 28 Dec 2021 00:56:51 +0000 Subject: [PATCH 1/4] OA changes to support Ordered ECMP and DVS test for same. Signed-off-by: Abhishek Dosi --- orchagent/routeorch.cpp | 30 ++- orchagent/routeorch.h | 8 +- orchagent/switchorch.cpp | 55 +++++- orchagent/switchorch.h | 6 + orchagent/vnetorch.cpp | 41 +++- tests/test_nhg.py | 86 +++++++-- tests/test_vnet.py | 396 ++++++++++++++++++++++----------------- 7 files changed, 420 insertions(+), 202 deletions(-) diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 05606f2fb6..e3c27b9818 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -376,6 +376,13 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& nhgm_attrs.push_back(nhgm_attr); } + if (m_switchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nhopgroup->second.nhopgroup_members[nexthop].seq_id; + nhgm_attrs.push_back(nhgm_attr); + } + status = sai_next_hop_group_api->create_next_hop_group_member(&nexthop_id, gSwitchId, (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()); @@ -393,7 +400,7 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& ++count; gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - nhopgroup->second.nhopgroup_members[nexthop] = nexthop_id; + nhopgroup->second.nhopgroup_members[nexthop].next_hop_id = nexthop_id; } if (!m_fgNhgOrch->validNextHopInNextHopGroup(nexthop)) @@ -421,7 +428,7 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t continue; } - nexthop_id = nhopgroup->second.nhopgroup_members[nexthop]; + nexthop_id = nhopgroup->second.nhopgroup_members[nexthop].next_hop_id; status = sai_next_hop_group_api->remove_next_hop_group_member(nexthop_id); if (status != SAI_STATUS_SUCCESS) @@ -1241,7 +1248,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) vector nhg_attrs; nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attr.value.s32 = m_switchOrch->checkOrderedEcmpEnable() ? SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP : SAI_NEXT_HOP_GROUP_TYPE_ECMP; nhg_attrs.push_back(nhg_attr); sai_object_id_t next_hop_group_id; @@ -1295,6 +1302,13 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) nhgm_attrs.push_back(nhgm_attr); } + if (m_switchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = ((uint32_t)i) + 1; // To make non-zero sequence id + nhgm_attrs.push_back(nhgm_attr); + } + gNextHopGroupMemberBulker.create_entry(&nhgm_ids[i], (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()); @@ -1319,7 +1333,8 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end()) { auto it = nhopgroup_shared_set[nhid].begin(); - next_hop_group_entry.nhopgroup_members[*it] = nhgm_id; + next_hop_group_entry.nhopgroup_members[*it].next_hop_id = nhgm_id; + next_hop_group_entry.nhopgroup_members[*it].seq_id = (uint32_t)i + 1; nhopgroup_shared_set[nhid].erase(it); if (nhopgroup_shared_set[nhid].empty()) { @@ -1328,7 +1343,8 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) } else { - next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id; + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].next_hop_id = nhgm_id; + next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].seq_id = ((uint32_t)i) + 1; } } @@ -1373,12 +1389,12 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) if (m_neighOrch->isNextHopFlagSet(nhop->first, NHFLAGS_IFDOWN)) { SWSS_LOG_WARN("NHFLAGS_IFDOWN set for next hop group member %s with next_hop_id %" PRIx64, - nhop->first.to_string().c_str(), nhop->second); + nhop->first.to_string().c_str(), nhop->second.next_hop_id); nhop = nhgm.erase(nhop); continue; } - next_hop_ids.push_back(nhop->second); + next_hop_ids.push_back(nhop->second.next_hop_id); nhop = nhgm.erase(nhop); } diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 22756ad176..2c8826ecf7 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -24,7 +24,13 @@ #define LOOPBACK_PREFIX "Loopback" -typedef std::map NextHopGroupMembers; +struct NextHopGroupMemberEntry +{ + sai_object_id_t next_hop_id; // next hop sai oid + uint32_t seq_id; // Sequence Id of nexthop in the group +}; + +typedef std::map NextHopGroupMembers; struct NhgBase; diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 325bf2a3de..1f6d8e0093 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -1,4 +1,5 @@ #include +#include #include #include "switchorch.h" @@ -44,6 +45,9 @@ const map packet_action_map = {"trap", SAI_PACKET_ACTION_TRAP} }; + +const std::set switch_non_sai_attribute_set = {"ordered_ecmp"}; + SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, TableConnector switchTable): Orch(connectors), m_switchTable(switchTable.first, switchTable.second), @@ -224,7 +228,51 @@ void SwitchOrch::doCfgSensorsTableTask(Consumer &consumer) } } +void SwitchOrch::setSwitchNonSaiAttributes(swss::FieldValueTuple &val) +{ + auto attribute = fvField(val); + auto value = fvValue(val); + if (attribute == "ordered_ecmp") + { + vector fvVector; + if (value == "true") + { + const auto* meta = sai_metadata_get_attr_metadata(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, SAI_NEXT_HOP_GROUP_ATTR_TYPE); + if (meta && meta->isenum) + { + vector values_list(meta->enummetadata->valuescount); + sai_s32_list_t values; + values.count = static_cast(values_list.size()); + values.list = values_list.data(); + + auto status = sai_query_attribute_enum_values_capability(gSwitchId, + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + SAI_NEXT_HOP_GROUP_ATTR_TYPE, + &values); + if (status == SAI_STATUS_SUCCESS) + { + for (size_t i = 0; i < values.count; i++) + { + if (values.list[i] == SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP) + { + m_orderedEcmpEnable = true; + fvVector.emplace_back(SWITCH_CAPABILITY_ORDERED_ECMP_CAPABLE, "true"); + set_switch_capability(fvVector); + SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is configured"); + return; + } + } + } + } + } + m_orderedEcmpEnable = false; + fvVector.emplace_back(SWITCH_CAPABILITY_ORDERED_ECMP_CAPABLE, "false"); + set_switch_capability(fvVector); + SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is not configured"); + return; + } +} sai_status_t SwitchOrch::setSwitchTunnelVxlanParams(swss::FieldValueTuple &val) { auto attribute = fvField(val); @@ -296,7 +344,12 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) { auto attribute = fvField(i); - if (switch_attribute_map.find(attribute) == switch_attribute_map.end()) + if (switch_non_sai_attribute_set.find(attribute) != switch_non_sai_attribute_set.end()) + { + setSwitchNonSaiAttributes(i); + continue; + } + else if (switch_attribute_map.find(attribute) == switch_attribute_map.end()) { // Check additionally 'switch_tunnel_attribute_map' for Switch Tunnel if (switch_tunnel_attribute_map.find(attribute) == switch_tunnel_attribute_map.end()) diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 8c3789f523..49c87ff0d1 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -10,6 +10,7 @@ #define SWITCH_CAPABILITY_TABLE_PORT_TPID_CAPABLE "PORT_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" +#define SWITCH_CAPABILITY_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" struct WarmRestartCheck { @@ -37,6 +38,8 @@ class SwitchOrch : public Orch // Initialize the ACL groups bind to Switch void initAclGroupsBindToSwitch(); + bool checkOrderedEcmpEnable() { return m_orderedEcmpEnable; } + private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); @@ -45,6 +48,8 @@ class SwitchOrch : public Orch void initSensorsTable(); void querySwitchTpidCapability(); sai_status_t setSwitchTunnelVxlanParams(swss::FieldValueTuple &val); + void setSwitchNonSaiAttributes(swss::FieldValueTuple &val); + // Create the default ACL group for the given stage, bind point is // SAI_ACL_BIND_POINT_TYPE_SWITCH and group type is @@ -74,6 +79,7 @@ class SwitchOrch : public Orch bool m_sensorsMaxTempSupported = true; bool m_sensorsAvgTempSupported = true; bool m_vxlanSportUserModeEnabled = false; + bool m_orderedEcmpEnable = false; // Information contained in the request from // external program for orchagent pre-shutdown state check diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index dc5838d8a5..30fff2859e 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -40,7 +40,7 @@ extern CrmOrch *gCrmOrch; extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; - +extern SwitchOrch *gSwitchOrch; /* * VRF Modeling and VNetVrf class definitions */ @@ -675,9 +675,12 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n vector next_hop_ids; set next_hop_set = nexthops.getNextHops(); std::map nhopgroup_members_set; + std::map nh_seq_id_in_nhgrp; + uint32_t seq_id = 0; for (auto it : next_hop_set) { + nh_seq_id_in_nhgrp[it] = ++seq_id; if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) { continue; @@ -691,7 +694,7 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n vector nhg_attrs; nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + nhg_attr.value.s32 = gSwitchOrch->checkOrderedEcmpEnable() ? SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP : SAI_NEXT_HOP_GROUP_TYPE_ECMP; nhg_attrs.push_back(nhg_attr); sai_object_id_t next_hop_group_id; @@ -728,6 +731,13 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n nhgm_attr.value.oid = nhid; nhgm_attrs.push_back(nhgm_attr); + if (gSwitchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nh_seq_id_in_nhgrp[nhopgroup_members_set.find(nhid)->second]; + nhgm_attrs.push_back(nhgm_attr); + } + sai_object_id_t next_hop_group_member_id; status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, gSwitchId, @@ -860,7 +870,10 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopGroupInfo next_hop_group_entry; next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); next_hop_group_entry.ref_count = 0; - next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + if (nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) + { + next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + } syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; } else @@ -1648,7 +1661,20 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) NextHopGroupKey nexthops = nhg_info_pair.first; NextHopGroupInfo& nhg_info = nhg_info_pair.second; - if (!(nexthops.contains(endpoint))) + std::set next_hop_set = nexthops.getNextHops(); + uint32_t seq_id = 0; + uint32_t nh_seq_id = 0; + for (auto nh: next_hop_set) + { + seq_id++; + if (nh == endpoint) + { + nh_seq_id = seq_id; + break; + } + } + + if (!nh_seq_id) { continue; } @@ -1670,6 +1696,13 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) nhgm_attr.value.oid = vrf_obj->getTunnelNextHop(endpoint); nhgm_attrs.push_back(nhgm_attr); + if (gSwitchOrch->checkOrderedEcmpEnable()) + { + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID; + nhgm_attr.value.u32 = nh_seq_id; + nhgm_attrs.push_back(nhgm_attr); + } + sai_status_t status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, gSwitchId, (uint32_t)nhgm_attrs.size(), diff --git a/tests/test_nhg.py b/tests/test_nhg.py index c8d75fb5e7..390a8e0f51 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -154,11 +154,13 @@ def init_test(self, dvs, num_intfs): self.app_db = self.dvs.get_app_db() self.asic_db = self.dvs.get_asic_db() self.config_db = self.dvs.get_config_db() + self.state_db = self.dvs.get_state_db() self.nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_NEXTHOP_GROUP_TABLE_NAME) self.rt_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_ROUTE_TABLE_NAME) self.lr_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_LABEL_ROUTE_TABLE_NAME) self.cbf_nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_CLASS_BASED_NEXT_HOP_GROUP_TABLE_NAME) self.fc_to_nhg_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_FC_TO_NHG_INDEX_MAP_TABLE_NAME) + self.switch_ps = swsscommon.ProducerStateTable(self.app_db.db_connection, swsscommon.APP_SWITCH_TABLE_NAME) # Set switch FC capability to 63 self.dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_MAX_NUMBER_OF_FORWARDING_CLASSES', '63') @@ -182,6 +184,16 @@ def route_exists(self, rt_prefix): def nhg_map_exists(self, nhg_map_index): return self.get_nhg_map_id(nhg_map_index) is not None + def enable_ordered_ecmp(self): + switch_fvs = swsscommon.FieldValuePairs([('ordered_ecmp', 'true')]) + self.switch_ps.set('switch', switch_fvs) + self.state_db.wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + + def disble_ordered_ecmp(self): + switch_fvs = swsscommon.FieldValuePairs([('ordered_ecmp', 'false')]) + self.switch_ps.set('switch', switch_fvs) + self.state_db.wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + class TestNhgExhaustBase(TestNextHopGroupBase): MAX_ECMP_COUNT = 512 MAX_PORT_COUNT = 10 @@ -887,8 +899,13 @@ def test_cbf_nhg_exhaust(self, dvs, testlog): class TestNextHopGroup(TestNextHopGroupBase): - def test_route_nhg(self, dvs, dvs_route, testlog): + @pytest.mark.parametrize('ordered_ecmp', ['false', 'true']) + def test_route_nhg(self, ordered_ecmp, dvs, dvs_route, testlog): self.init_test(dvs, 3) + nhip_seqid_map = {"10.0.0.1" : "1", "10.0.0.3" : "2" , "10.0.0.5" : "3" } + + if ordered_ecmp == 'true': + self.enable_ordered_ecmp() rtprefix = "2.2.2.0/24" @@ -911,6 +928,11 @@ def test_route_nhg(self, dvs, dvs_route, testlog): assert bool(fvs) + if ordered_ecmp == 'true': + assert fvs["SAI_NEXT_HOP_GROUP_ATTR_TYPE"] == "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP" + else: + assert fvs["SAI_NEXT_HOP_GROUP_ATTR_TYPE"] == "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 @@ -923,6 +945,13 @@ def test_route_nhg(self, dvs, dvs_route, testlog): # verify weight attributes not in asic db assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) @@ -930,8 +959,9 @@ def test_route_nhg(self, dvs, dvs_route, testlog): dvs_route.check_asicdb_deleted_route_entries([rtprefix]) # Negative test with nexthops with incomplete weight info - fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.3,10.0.0.5"), - ("ifname", "Ethernet0,Ethernet4,Ethernet8"), + # To validate Order ECMP change the nexthop order + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.5,10.0.0.1,10.0.0.3"), + ("ifname", "Ethernet8,Ethernet0,Ethernet4"), ("weight", "10,30")]) self.rt_ps.set(rtprefix, fvs) @@ -939,25 +969,33 @@ def test_route_nhg(self, dvs, dvs_route, testlog): rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) # assert the route points to next hop group - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0]) + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0]) nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid) + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) assert bool(fvs) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 for k in keys: - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid # verify weight attributes not in asic db assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT") is None + + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) @@ -974,20 +1012,20 @@ def test_route_nhg(self, dvs, dvs_route, testlog): rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) # assert the route points to next hop group - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", rtkeys[0]) + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkeys[0]) nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nhgid) + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) assert bool(fvs) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 3 for k in keys: - fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid @@ -995,6 +1033,13 @@ def test_route_nhg(self, dvs, dvs_route, testlog): nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] weight = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT"] + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) nhip = fvs["SAI_NEXT_HOP_ATTR_IP"].split('.') expected_weight = int(nhip[3]) * 10 @@ -1011,11 +1056,11 @@ def test_route_nhg(self, dvs, dvs_route, testlog): # wait for route to be programmed time.sleep(1) - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + keys = self.asic_db.get_keys(self.ASIC_NHG_STR) assert len(keys) == 2 - keys = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) assert len(keys) == 6 @@ -1035,7 +1080,8 @@ def test_route_nhg(self, dvs, dvs_route, testlog): assert len(keys) == 2 - i # bring links up one-by-one - for i in [0, 1, 2]: + # Bring link up in random order to verify sequence id is as per order + for i, val in enumerate([2,1,0]): self.flap_intf(i, 'up') keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) @@ -1045,13 +1091,23 @@ def test_route_nhg(self, dvs, dvs_route, testlog): for k in keys: fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhgid - + if ordered_ecmp == "true": + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + assert nhip_seqid_map[nh_fvs["SAI_NEXT_HOP_ATTR_IP"]] == fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) # Wait for route 2.2.2.0/24 to be removed dvs_route.check_asicdb_deleted_route_entries([rtprefix]) + # Cleanup by disabling to get default behaviour + if ordered_ecmp == 'true': + self.disble_ordered_ecmp() + def test_label_route_nhg(self, dvs, testlog): self.init_test(dvs, 3) diff --git a/tests/test_vnet.py b/tests/test_vnet.py index a41f9ee39f..6790fbde19 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -503,16 +503,17 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" - tunnel_map_ids = set() - tunnel_map_entry_ids = set() - tunnel_ids = set() - tunnel_term_ids = set() - tunnel_map_map = {} - tunnel = {} - vnet_vr_ids = set() - vr_map = {} - nh_ids = {} - nhg_ids = {} + def __init__(self): + self.tunnel_map_ids = set() + self.tunnel_map_entry_ids = set() + self.tunnel_ids = set() + self.tunnel_term_ids = set() + self.tunnel_map_map = {} + self.tunnel = {} + self.vnet_vr_ids = set() + self.vr_map = {} + self.nh_ids = {} + self.nhg_ids = {} def fetch_exist_entries(self, dvs): self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) @@ -798,7 +799,7 @@ def serialize_endpoint_group(self, endpoints): endpoints.sort() return ",".join(endpoints) - def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attrs): + def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) @@ -817,11 +818,17 @@ def check_next_hop_group_member(self, dvs, nhg, expected_endpoint, expected_attr endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] endpoints.append(endpoint) assert endpoint in expected_attrs + if ordered_ecmp == "true": + assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str - def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg=""): + def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) @@ -839,6 +846,8 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) if mac and mac[idx]: expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + if ordered_ecmp == "true" and nh_seq_id: + expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) expected_attrs[endpoint] = expected_attr if nhg: @@ -853,12 +862,12 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r # Check routes in ingress VRF expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", } check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) # Check nexthop group member - self.check_next_hop_group_member(dvs, new_nhg, endpoints, expected_attrs) + self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) if route_ids: new_route = route_ids @@ -901,6 +910,32 @@ class TestVnetOrch(object): def get_vnet_obj(self): return VnetVxlanVrfTunnel() + @pytest.fixture(params=["true", "false"]) + def ordered_ecmp(self, dvs, request): + + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'true') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + + yield request.param + + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'false') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + ''' Test 1 - Create Vlan Interface, Tunnel and Vnet ''' @@ -1335,202 +1370,212 @@ def test_vnet_vxlan_multi_map(self, dvs, testlog): ''' Test 7 - Test for vnet tunnel routes with ECMP nexthop group ''' - def test_vnet_orch_7(self, dvs, testlog): + def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_7' + tunnel_name = 'tunnel_7' + ordered_ecmp + vnet_name = 'Vnet7' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') - create_vnet_entry(dvs, 'Vnet7', tunnel_name, '10007', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10007', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet7') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet7', '10007') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10007') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.3,7.0.0.2,7.0.0.1') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.4,7.0.0.3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) assert nhg2_1 == nhg1_2 # Remove one of the tunnel routes - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 in vnet_obj.nhgs # Remove the other tunnel route - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg2_1 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet7') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet7') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group ''' - def test_vnet_orch_8(self, dvs, testlog): + def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): + vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_8' + tunnel_name = 'tunnel_8' + ordered_ecmp + vnet_name = 'Vnet8' + ordered_ecmp + vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, 'Vnet8', tunnel_name, '10008', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10008', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet8') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet8', '10008') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10008') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + create_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::1,fd:8:1::3,fd:8:1::2') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + set_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::2,fd:8:1::3,fd:8:1::1,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + create_vnet_routes(dvs, "fd:8:20::32/128", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) assert nhg2_1 == nhg1_2 # Create another tunnel route with ipv4 prefix to the same set of endpoints - create_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) - check_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + create_vnet_routes(dvs, "8.0.0.0/24", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) assert nhg3_1 == nhg1_2 # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128") + delete_vnet_routes(dvs, "fd:8:10::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:10::32/128") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 in vnet_obj.nhgs # Remove tunnel route 2 - delete_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:20::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128") + delete_vnet_routes(dvs, "fd:8:20::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:20::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:20::32/128") # Remove tunnel route 3 - delete_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["8.0.0.0/24"]) - check_remove_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24") + delete_vnet_routes(dvs, "8.0.0.0/24", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["8.0.0.0/24"]) + check_remove_state_db_routes(dvs, vnet_name, "8.0.0.0/24") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg2_1 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet8') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet8') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_9(self, dvs, testlog): + def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_9' + tunnel_name = 'tunnel_9' + ordered_ecmp + vnet_name = 'Vnet9' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, 'Vnet9', tunnel_name, '10009', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10009', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet9') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet9', '10009') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10009') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, '9.1.0.1', 'Up') update_bfd_session_state(dvs, '9.1.0.2', 'Up') update_bfd_session_state(dvs, '9.1.0.3', 'Up') + update_bfd_session_state(dvs, '9.1.0.1', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) # Remove endpoint from group if it goes down update_bfd_session_state(dvs, '9.1.0.2', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1']) # Update BFD session state and verify route change update_bfd_session_state(dvs, '9.1.0.5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) # Update BFD state and check route nexthop update_bfd_session_state(dvs, '9.1.0.3', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1']) # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') update_bfd_session_state(dvs, '9.1.0.4', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1539,8 +1584,8 @@ def test_vnet_orch_9(self, dvs, testlog): # Set BFD session state for a down endpoint to up update_bfd_session_state(dvs, '9.1.0.2', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) # Set all endpoint to down state update_bfd_session_state(dvs, '9.1.0.1', 'Down') @@ -1550,15 +1595,15 @@ def test_vnet_orch_9(self, dvs, testlog): time.sleep(2) # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.5']) - check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.5']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1569,9 +1614,9 @@ def test_vnet_orch_9(self, dvs, testlog): check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1580,75 +1625,76 @@ def test_vnet_orch_9(self, dvs, testlog): # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) - delete_vnet_entry(dvs, 'Vnet9') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet9') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_10(self, dvs, testlog): + def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_10' + tunnel_name = 'tunnel_10' + ordered_ecmp + vnet_name = 'Vnet10' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - create_vnet_entry(dvs, 'Vnet10', tunnel_name, '10010', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet10') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet10', '10010') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) # Remove endpoint from group if it goes down update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1']) + create_vnet_routes(dvs, "fd:10:20::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1']) # Update BFD session state and verify route change update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) # Update BFD state and check route nexthop update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) # Set the route to a new group - set_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') + set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) assert nhg1_1 not in vnet_obj.nhgs @@ -1656,8 +1702,9 @@ def test_vnet_orch_10(self, dvs, testlog): # Set BFD session state for a down endpoint to up update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) # Set all endpoint to down state update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') @@ -1667,15 +1714,15 @@ def test_vnet_orch_10(self, dvs, testlog): time.sleep(2) # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::5']) - check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::5']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) # Remove tunnel route2 - delete_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:20::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128") + delete_vnet_routes(dvs, "fd:10:20::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:20::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:20::1/128") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1690,9 +1737,9 @@ def test_vnet_orch_10(self, dvs, testlog): check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128") + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) @@ -1701,76 +1748,77 @@ def test_vnet_orch_10(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) assert nhg1_2 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet10') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet10') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) ''' Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor ''' - def test_vnet_orch_11(self, dvs, testlog): + def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_11' + tunnel_name = 'tunnel_11' + ordered_ecmp + vnet_name = 'Vnet11' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') - create_vnet_entry(dvs, 'Vnet11', tunnel_name, '100011', "") + create_vnet_entry(dvs, vnet_name, tunnel_name, '100011', "") - vnet_obj.check_vnet_entry(dvs, 'Vnet11') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet11', '100011') + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '100011') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.1', ep_monitor='11.1.0.1') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.1', ep_monitor='11.1.0.1') # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) # Route should be properly configured when bfd session state goes up update_bfd_session_state(dvs, '11.1.0.1', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.1', tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", ['11.0.0.1']) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['11.0.0.1']) # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11', '11.0.0.1,11.0.0.2', ep_monitor='11.1.0.1,11.1.0.2') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1']) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '11.0.0.2,11.0.0.1', ep_monitor='11.1.0.2,11.1.0.1') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1']) # Create a third tunnel route with another endpoint vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + create_vnet_routes(dvs, "100.100.3.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') # Update BFD session state and verify route change update_bfd_session_state(dvs, '11.1.0.2', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) update_bfd_session_state(dvs, '11.1.0.1', 'Down') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['2']) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) # Set the route1 to a new endpoint vnet_obj.fetch_exist_entries(dvs) - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') - vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) - check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32") + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1781,20 +1829,20 @@ def test_vnet_orch_11(self, dvs, testlog): check_bfd_session(dvs, ['11.1.0.2']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") # Remove tunnel route 3 - delete_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.3.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32") + delete_vnet_routes(dvs, "100.100.3.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.3.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.3.1/32") # Confirm the BFD sessions are removed check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) - delete_vnet_entry(dvs, 'Vnet11') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet11') + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) From 9322837ef57a6e938997f9acee9908532da06c46 Mon Sep 17 00:00:00 2001 From: Abhishek Dosi Date: Thu, 6 Jan 2022 23:20:27 +0000 Subject: [PATCH 2/4] Address Review comment and LGTM Signed-off-by: Abhishek Dosi --- orchagent/switchorch.cpp | 4 ++-- orchagent/switchorch.h | 2 +- tests/test_nhg.py | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 1f6d8e0093..48ecd1fd35 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -257,7 +257,7 @@ void SwitchOrch::setSwitchNonSaiAttributes(swss::FieldValueTuple &val) if (values.list[i] == SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP) { m_orderedEcmpEnable = true; - fvVector.emplace_back(SWITCH_CAPABILITY_ORDERED_ECMP_CAPABLE, "true"); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE, "true"); set_switch_capability(fvVector); SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is configured"); return; @@ -267,7 +267,7 @@ void SwitchOrch::setSwitchNonSaiAttributes(swss::FieldValueTuple &val) } } m_orderedEcmpEnable = false; - fvVector.emplace_back(SWITCH_CAPABILITY_ORDERED_ECMP_CAPABLE, "false"); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE, "false"); set_switch_capability(fvVector); SWSS_LOG_NOTICE("Ordered ECMP/Nexthop-Group is not configured"); return; diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 49c87ff0d1..5b09a67640 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -10,7 +10,7 @@ #define SWITCH_CAPABILITY_TABLE_PORT_TPID_CAPABLE "PORT_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" -#define SWITCH_CAPABILITY_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" struct WarmRestartCheck { diff --git a/tests/test_nhg.py b/tests/test_nhg.py index 390a8e0f51..83a744a8b4 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -3,7 +3,6 @@ import sys import time import json -import pytest import ipaddress from swsscommon import swsscommon From e5a958c118405d54bc97ba2ebb73263f46db65fc Mon Sep 17 00:00:00 2001 From: Abhishek Dosi Date: Tue, 11 Jan 2022 16:43:48 +0000 Subject: [PATCH 3/4] Fix Signed-off-by: Abhishek Dosi --- tests/test_nhg.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_nhg.py b/tests/test_nhg.py index 83a744a8b4..390a8e0f51 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -3,6 +3,7 @@ import sys import time import json +import pytest import ipaddress from swsscommon import swsscommon From 8931ec4205b4e83299a980c623d898200db17c22 Mon Sep 17 00:00:00 2001 From: Abhishek Dosi Date: Tue, 11 Jan 2022 23:57:54 +0000 Subject: [PATCH 4/4] Fix Merge Conflict Signed-off-by: Abhishek Dosi --- tests/test_vnet.py | 129 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 128 insertions(+), 1 deletion(-) diff --git a/tests/test_vnet.py b/tests/test_vnet.py index edf094ca16..41217de92e 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -1819,7 +1819,6 @@ def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): # The default Vnet setting does not advertise prefix check_remove_routes_advertisement(dvs, "fd:10:20::1/128") - # Remove tunnel route2 delete_vnet_routes(dvs, "fd:10:20::1/128", vnet_name) vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:20::1/128"]) @@ -1963,6 +1962,134 @@ def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): vnet_obj.check_del_vnet_entry(dvs, vnet_name) + ''' + Test 12 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor and route advertisement + ''' + def test_vnet_orch_12(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_12' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + create_vnet_entry(dvs, 'Vnet12', tunnel_name, '10012', "", advertise_prefix=True) + + vnet_obj.check_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet12', '10012') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '12.1.0.1', 'Up') + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + update_bfd_session_state(dvs, '12.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.5', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '12.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1', '12.0.0.5']) + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4') + update_bfd_session_state(dvs, '12.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32") + + # Set all endpoint to down state + update_bfd_session_state(dvs, '12.1.0.1', 'Down') + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + update_bfd_session_state(dvs, '12.1.0.4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.5']) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.2.1/32") + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['12.1.0.5']) + check_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4', '12.1.0.5']) + + delete_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying