Skip to content

Commit 9f22ba7

Browse files
authored
Handle IPv6 and ECMP routes to be programmed to ASIC (#1711)
*In case of standby mux, associated routes may come to orch with ifname of tun0. Handle the case when nexthop is non-zero *For ECMP, multiple nexthop IPs can have the same sai nexthop id (tunnel NH). Existing data structure is unable to handle such case. Added a secondary map if nexthops are shared
1 parent 1b916c3 commit 9f22ba7

File tree

5 files changed

+110
-26
lines changed

5 files changed

+110
-26
lines changed

cfgmgr/tunnelmgr.cpp

+30-8
Original file line numberDiff line numberDiff line change
@@ -64,21 +64,43 @@ static int cmdIpTunnelRouteAdd(const std::string& pfx, std::string & res)
6464
// ip route add/replace {{ip prefix}} dev {{tunnel intf}}
6565
// Replace route if route already exists
6666
ostringstream cmd;
67-
cmd << IP_CMD " route replace "
68-
<< shellquote(pfx)
69-
<< " dev "
70-
<< TUNIF;
67+
if (IpPrefix(pfx).isV4())
68+
{
69+
cmd << IP_CMD " route replace "
70+
<< shellquote(pfx)
71+
<< " dev "
72+
<< TUNIF;
73+
}
74+
else
75+
{
76+
cmd << IP_CMD " -6 route replace "
77+
<< shellquote(pfx)
78+
<< " dev "
79+
<< TUNIF;
80+
}
81+
7182
return swss::exec(cmd.str(), res);
7283
}
7384

7485
static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res)
7586
{
7687
// ip route del {{ip prefix}} dev {{tunnel intf}}
7788
ostringstream cmd;
78-
cmd << IP_CMD " route del "
79-
<< shellquote(pfx)
80-
<< " dev "
81-
<< TUNIF;
89+
if (IpPrefix(pfx).isV4())
90+
{
91+
cmd << IP_CMD " route del "
92+
<< shellquote(pfx)
93+
<< " dev "
94+
<< TUNIF;
95+
}
96+
else
97+
{
98+
cmd << IP_CMD " -6 route del "
99+
<< shellquote(pfx)
100+
<< " dev "
101+
<< TUNIF;
102+
}
103+
82104
return swss::exec(cmd.str(), res);
83105
}
84106

orchagent/muxorch.cpp

-12
Original file line numberDiff line numberDiff line change
@@ -1278,12 +1278,6 @@ void MuxCableOrch::updateMuxState(string portName, string muxState)
12781278

12791279
void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey)
12801280
{
1281-
if (!nhKey.ip_address.isV4())
1282-
{
1283-
SWSS_LOG_INFO("IPv6 tunnel route add '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str());
1284-
return;
1285-
}
1286-
12871281
vector<FieldValueTuple> data;
12881282
string key, alias = nhKey.alias;
12891283

@@ -1299,12 +1293,6 @@ void MuxCableOrch::addTunnelRoute(const NextHopKey &nhKey)
12991293

13001294
void MuxCableOrch::removeTunnelRoute(const NextHopKey &nhKey)
13011295
{
1302-
if (!nhKey.ip_address.isV4())
1303-
{
1304-
SWSS_LOG_INFO("IPv6 tunnel route remove '%s' - (Not Implemented)", nhKey.ip_address.to_string().c_str());
1305-
return;
1306-
}
1307-
13081296
string key, alias = nhKey.alias;
13091297

13101298
IpPrefix pfx = nhKey.ip_address.to_string();

orchagent/neighorch.cpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,10 @@ bool NeighOrch::isNextHopFlagSet(const NextHopKey &nexthop, const uint32_t nh_fl
313313

314314
auto nhop = m_syncdNextHops.find(nexthop);
315315

316-
assert(nhop != m_syncdNextHops.end());
316+
if (nhop == m_syncdNextHops.end())
317+
{
318+
return false;
319+
}
317320

318321
if (nhop->second.nh_flags & nh_flag)
319322
{

orchagent/routeorch.cpp

+37-5
Original file line numberDiff line numberDiff line change
@@ -574,7 +574,7 @@ void RouteOrch::doTask(Consumer& consumer)
574574
* way is to create loopback interface and then create
575575
* route pointing to it, so that we can traps packets to
576576
* CPU */
577-
if (alias == "eth0" || alias == "docker0" || alias == "tun0" ||
577+
if (alias == "eth0" || alias == "docker0" ||
578578
alias == "lo" || !alias.compare(0, strlen(LOOPBACK_PREFIX), LOOPBACK_PREFIX))
579579
{
580580
excp_intfs_flag = true;
@@ -599,10 +599,18 @@ void RouteOrch::doTask(Consumer& consumer)
599599

600600
if (overlay_nh == false)
601601
{
602+
if (alsv[0] == "tun0" && !(IpAddress(ipv[0]).isZero()))
603+
{
604+
alsv[0] = gIntfsOrch->getRouterIntfsAlias(ipv[0]);
605+
}
602606
nhg_str = ipv[0] + NH_DELIMITER + alsv[0];
603607

604608
for (uint32_t i = 1; i < ipv.size(); i++)
605609
{
610+
if (alsv[i] == "tun0" && !(IpAddress(ipv[i]).isZero()))
611+
{
612+
alsv[i] = gIntfsOrch->getRouterIntfsAlias(ipv[i]);
613+
}
606614
nhg_str += NHG_DELIMITER + ipv[i] + NH_DELIMITER + alsv[i];
607615
}
608616

@@ -628,6 +636,11 @@ void RouteOrch::doTask(Consumer& consumer)
628636
/* add addBlackholeRoute or addRoute support empty nhg */
629637
it = consumer.m_toSync.erase(it);
630638
}
639+
/* skip direct routes to tun0 */
640+
else if (alsv[0] == "tun0")
641+
{
642+
it = consumer.m_toSync.erase(it);
643+
}
631644
/* directly connected route to VRF interface which come from kernel */
632645
else if (!alsv[0].compare(0, strlen(VRF_PREFIX), VRF_PREFIX))
633646
{
@@ -1009,6 +1022,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
10091022
vector<sai_object_id_t> next_hop_ids;
10101023
set<NextHopKey> next_hop_set = nexthops.getNextHops();
10111024
std::map<sai_object_id_t, NextHopKey> nhopgroup_members_set;
1025+
std::map<sai_object_id_t, set<NextHopKey>> nhopgroup_shared_set;
10121026

10131027
/* Assert each IP address exists in m_syncdNextHops table,
10141028
* and add the corresponding next_hop_id to next_hop_ids. */
@@ -1029,7 +1043,14 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
10291043

10301044
sai_object_id_t next_hop_id = m_neighOrch->getNextHopId(it);
10311045
next_hop_ids.push_back(next_hop_id);
1032-
nhopgroup_members_set[next_hop_id] = it;
1046+
if (nhopgroup_members_set.find(next_hop_id) == nhopgroup_members_set.end())
1047+
{
1048+
nhopgroup_members_set[next_hop_id] = it;
1049+
}
1050+
else
1051+
{
1052+
nhopgroup_shared_set[next_hop_id].insert(it);
1053+
}
10331054
}
10341055

10351056
sai_attribute_t nhg_attr;
@@ -1103,8 +1124,20 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
11031124
gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER);
11041125

11051126
// Save the membership into next hop structure
1106-
next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] =
1107-
nhgm_id;
1127+
if (nhopgroup_shared_set.find(nhid) != nhopgroup_shared_set.end())
1128+
{
1129+
auto it = nhopgroup_shared_set[nhid].begin();
1130+
next_hop_group_entry.nhopgroup_members[*it] = nhgm_id;
1131+
nhopgroup_shared_set[nhid].erase(it);
1132+
if (nhopgroup_shared_set[nhid].empty())
1133+
{
1134+
nhopgroup_shared_set.erase(nhid);
1135+
}
1136+
}
1137+
else
1138+
{
1139+
next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second] = nhgm_id;
1140+
}
11081141
}
11091142

11101143
/* Increment the ref_count for the next hops used by the next hop group. */
@@ -1118,7 +1151,6 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops)
11181151
next_hop_group_entry.ref_count = 0;
11191152
m_syncdNextHopGroups[nexthops] = next_hop_group_entry;
11201153

1121-
11221154
return true;
11231155
}
11241156

tests/test_mux.py

+39
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ def create_vlan_interface(self, confdb, asicdb, dvs):
5959
fvs = {"NULL": "NULL"}
6060
confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs)
6161
confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs)
62+
confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs)
6263

6364
dvs.runcmd("config interface startup Ethernet0")
6465
dvs.runcmd("config interface startup Ethernet4")
@@ -334,6 +335,44 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route):
334335
self.set_mux_state(appdb, "Ethernet4", "active")
335336
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0])
336337

338+
ps._del(rtprefix)
339+
340+
# Test IPv6 ECMP routes and start with standby config
341+
self.set_mux_state(appdb, "Ethernet0", "standby")
342+
self.set_mux_state(appdb, "Ethernet4", "standby")
343+
344+
rtprefix = "2020::/64"
345+
346+
dvs_route.check_asicdb_deleted_route_entries([rtprefix])
347+
348+
ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE")
349+
350+
fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")])
351+
352+
ps.set(rtprefix, fvs)
353+
354+
# Check if route was propagated to ASIC DB
355+
rtkeys = dvs_route.check_asicdb_route_entries([rtprefix])
356+
357+
# Check for nexthop group and validate nexthop group member in asic db
358+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2)
359+
360+
# Step: 1 - Change one NH to active and verify ecmp route
361+
self.set_mux_state(appdb, "Ethernet0", "active")
362+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1)
363+
364+
# Step: 2 - Change the other NH to active and verify ecmp route
365+
self.set_mux_state(appdb, "Ethernet4", "active")
366+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0])
367+
368+
# Step: 3 - Change one NH to back to standby and verify ecmp route
369+
self.set_mux_state(appdb, "Ethernet0", "standby")
370+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1)
371+
372+
# Step: 4 - Change the other NH to standby and verify ecmp route
373+
self.set_mux_state(appdb, "Ethernet4", "standby")
374+
self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2)
375+
337376

338377
def get_expected_sai_qualifiers(self, portlist, dvs_acl):
339378
expected_sai_qualifiers = {

0 commit comments

Comments
 (0)