Skip to content

Commit ca9b073

Browse files
ilyacodesfacebook-github-bot
authored andcommitted
Delete Separa
Summary: We have conclusively determined that Separa as designed is not a reliable solution for the problem it was trying to solve. In particular, it behaved poorly in cases where the backhaul connectivity was flaky. We are moving forward with OWMP/"a12s", so delete the old code. Reviewed By: ammubhave Differential Revision: D15911067 fbshipit-source-id: f43e956333a2dea3d53490dacaf2f97c95b9efe7
1 parent 6ebb032 commit ca9b073

File tree

6 files changed

+9
-757
lines changed

6 files changed

+9
-757
lines changed

fbmeshd.cmake

-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@ add_executable(fbmeshd
8282
openr/fbmeshd/routing/Routing.cpp
8383
openr/fbmeshd/routing/SyncRoutes80211s.cpp
8484
openr/fbmeshd/routing/UDPRoutingPacketTransport.cpp
85-
openr/fbmeshd/separa/Separa.cpp
8685
$<TARGET_OBJECTS:fbmeshd-cpp2-obj>
8786
)
8887

openr/fbmeshd/main.cpp

+8-44
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@
3939
#include <openr/fbmeshd/routing/Routing.h>
4040
#include <openr/fbmeshd/routing/SyncRoutes80211s.h>
4141
#include <openr/fbmeshd/routing/UDPRoutingPacketTransport.h>
42-
#include <openr/fbmeshd/separa/Separa.h>
4342
#include <openr/watchdog/Watchdog.h>
4443

4544
using namespace openr::fbmeshd;
@@ -86,26 +85,27 @@ DEFINE_bool(
8685
DEFINE_bool(
8786
enable_separa,
8887
false,
89-
"If set, Separa algorithm will be enabled to manage mesh partitions. "
90-
"Implies enable_separa_broadcasts=true");
88+
"DEPRECATED; TODO: delete after Separa is disabled on all meshes");
9189
DEFINE_int32(
92-
separa_hello_port, 6667, "The port used to send separa hello packets");
90+
separa_hello_port,
91+
6667,
92+
"DEPRECATED; TODO: delete after Separa is disabled on all meshes");
9393
DEFINE_int32(
9494
separa_broadcast_interval_s,
9595
1,
96-
"how often to send separa broadcasts in seconds");
96+
"DEPRECATED; TODO: delete after Separa is disabled on all meshes");
9797
DEFINE_int32(
9898
separa_domain_lockdown_period_s,
9999
60,
100-
"how long to lockdown domains in seconds");
100+
"DEPRECATED; TODO: delete after Separa is disabled on all meshes");
101101
DEFINE_double(
102102
separa_domain_change_threshold_factor,
103103
1,
104-
"threshold factor for doing a separa domain change");
104+
"DEPRECATED; TODO: delete after Separa is disabled on all meshes");
105105
DEFINE_bool(
106106
enable_separa_broadcasts,
107107
true,
108-
"If set, Separa broadcasts will be enabled");
108+
"DEPRECATED; TODO: delete after Separa is disabled on all meshes");
109109

110110
DEFINE_int32(
111111
decision_rep_port,
@@ -358,7 +358,6 @@ main(int argc, char* argv[]) {
358358
FLAGS_mesh_ifname,
359359
kvStoreLocalCmdUrl,
360360
kvStoreLocalPubUrl,
361-
FLAGS_enable_separa,
362361
zmqContext);
363362
}
364363

@@ -389,36 +388,6 @@ main(int argc, char* argv[]) {
389388
!(FLAGS_enable_event_based_peer_selector &&
390389
FLAGS_enable_userspace_mesh_peering)};
391390

392-
std::unique_ptr<Separa> separa;
393-
if (meshSpark != nullptr && FLAGS_is_openr_enabled &&
394-
(FLAGS_enable_separa_broadcasts || FLAGS_enable_separa)) {
395-
separa = std::make_unique<Separa>(
396-
FLAGS_separa_hello_port,
397-
std::chrono::seconds{FLAGS_separa_broadcast_interval_s},
398-
std::chrono::seconds{FLAGS_separa_domain_lockdown_period_s},
399-
FLAGS_separa_domain_change_threshold_factor,
400-
!FLAGS_enable_separa,
401-
nlHandler,
402-
*meshSpark,
403-
prefixManagerLocalCmdUrl,
404-
decisionCmdUrl,
405-
kvStoreLocalCmdUrl,
406-
kvStoreLocalPubUrl,
407-
monitorSubmitUrl,
408-
zmqContext);
409-
410-
static constexpr auto separaId{"Separa"};
411-
monitorEventLoopWithWatchdog(separa.get(), separaId, watchdog.get());
412-
413-
allThreads.emplace_back(std::thread([&separa]() noexcept {
414-
LOG(INFO) << "Starting the Separa thread...";
415-
folly::setThreadName(separaId);
416-
separa->run();
417-
LOG(INFO) << "Separa thread got stopped.";
418-
}));
419-
separa->waitUntilRunning();
420-
}
421-
422391
std::unique_ptr<Gateway11sRootRouteProgrammer> gateway11sRootRouteProgrammer;
423392
static constexpr auto gateway11sRootRouteProgrammerId{
424393
"Gateway11sRootRouteProgrammer"};
@@ -565,11 +534,6 @@ main(int argc, char* argv[]) {
565534
gatewayConnectivityMonitor.stop();
566535
gatewayConnectivityMonitor.waitUntilStopped();
567536

568-
if (separa) {
569-
separa->stop();
570-
separa->waitUntilStopped();
571-
}
572-
573537
if (routingEventLoop) {
574538
routing->resetSendPacketCallback();
575539
routingEventLoop->terminateLoopSoon();

openr/fbmeshd/mesh-spark/MeshSpark.cpp

+1-85
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ MeshSpark::MeshSpark(
4747
const std::string& ifName,
4848
const openr::KvStoreLocalCmdUrl& kvStoreLocalCmdUrl,
4949
const openr::KvStoreLocalPubUrl& kvStoreLocalPubUrl,
50-
bool enableDomains,
5150
fbzmq::Context& zmqContext)
5251
: zmqLoop_{zmqLoop},
5352
zmqContext_{zmqContext},
@@ -66,8 +65,7 @@ MeshSpark::MeshSpark(
6665
&zmqLoop,
6766
"node1", /* nodeId is used for writing to kvstore. not used herei */
6867
kvStoreLocalCmdUrl,
69-
kvStoreLocalPubUrl),
70-
enableDomains_{enableDomains} {
68+
kvStoreLocalPubUrl) {
7169
syncPeersTimer_ = fbzmq::ZmqTimeout::make(
7270
&zmqLoop_, [this]() mutable noexcept { syncPeers(); });
7371
syncPeersTimer_->scheduleTimeout(syncPeersInterval_, true);
@@ -246,45 +244,6 @@ MeshSpark::filterWhiteListedPeers(std::vector<folly::MacAddress>& peers) {
246244
peers = whiteListedPeers;
247245
}
248246

249-
void
250-
MeshSpark::filterInDomainPeers(std::vector<folly::MacAddress>& peers) {
251-
if (!enableDomains_) {
252-
return;
253-
}
254-
std::vector<folly::MacAddress> inDomainPeers;
255-
const auto myDomain = *myDomain_.rlock();
256-
neighborDomainCache_.withWLock([&peers, &inDomainPeers, myDomain](
257-
auto& neighborDomainCache_) {
258-
// Cleanup expired peer info in the cache
259-
std::unordered_set<folly::MacAddress> peerSet(peers.begin(), peers.end());
260-
std::unordered_set<folly::MacAddress> keysInCache;
261-
for (const auto& it : neighborDomainCache_) {
262-
keysInCache.emplace(it.first);
263-
}
264-
for (const auto& key : keysInCache) {
265-
if (peerSet.count(key) == 0) {
266-
neighborDomainCache_.erase(key);
267-
}
268-
}
269-
270-
// filter out peers that don't belong to our domain
271-
std::copy_if(
272-
peers.begin(),
273-
peers.end(),
274-
std::back_inserter(inDomainPeers),
275-
[&neighborDomainCache_, myDomain](const auto& peer) {
276-
if (neighborDomainCache_.count(peer) == 0) {
277-
return false;
278-
}
279-
const auto peerDomainAndEnabled = neighborDomainCache_.at(peer);
280-
return !peerDomainAndEnabled.second ||
281-
(peerDomainAndEnabled.first.hasValue() && myDomain.hasValue() &&
282-
peerDomainAndEnabled.first == myDomain);
283-
});
284-
});
285-
peers = inDomainPeers;
286-
}
287-
288247
void
289248
MeshSpark::syncPeers() {
290249
VLOG(1) << folly::sformat("MeshSpark::{}()", __func__);
@@ -299,9 +258,6 @@ MeshSpark::syncPeers() {
299258
}
300259
}
301260

302-
// remove peers that are not in this node's domain
303-
filterInDomainPeers(activePeers);
304-
305261
// remove peers that are not white-listed
306262
filterWhiteListedPeers(activePeers);
307263

@@ -352,43 +308,3 @@ MeshSpark::syncPeers() {
352308
}
353309
}
354310
}
355-
356-
folly::Optional<folly::MacAddress>
357-
MeshSpark::getDomain() {
358-
return *myDomain_.rlock();
359-
}
360-
361-
void
362-
MeshSpark::setDomain(folly::Optional<folly::MacAddress> newDomain) {
363-
myDomain_.withWLock([this, newDomain](auto& myDomain_) {
364-
myDomain_ = newDomain;
365-
zmqLoop_.runImmediatelyOrInEventLoop([this]() { syncPeers(); });
366-
});
367-
}
368-
369-
void
370-
MeshSpark::updateCache(
371-
folly::MacAddress node,
372-
std::pair<folly::Optional<folly::MacAddress>, bool> domain) {
373-
std::pair<folly::Optional<folly::MacAddress>, bool> oldDomain;
374-
neighborDomainCache_.withWLock(
375-
[domain, node, &oldDomain](auto& neighborDomainCache_) {
376-
auto it = neighborDomainCache_.find(node);
377-
if (it != neighborDomainCache_.end()) {
378-
oldDomain = it->second;
379-
it->second = domain;
380-
} else {
381-
neighborDomainCache_[node] = domain;
382-
}
383-
});
384-
if (!domain.second) {
385-
return;
386-
}
387-
myDomain_.withRLock([this, oldDomain, domain](auto& myDomain_) {
388-
if (myDomain_.hasValue() &&
389-
(domain.first == myDomain_ || oldDomain.first == myDomain_) &&
390-
domain.first != oldDomain.first) {
391-
zmqLoop_.runImmediatelyOrInEventLoop([this]() { syncPeers(); });
392-
}
393-
});
394-
}

openr/fbmeshd/mesh-spark/MeshSpark.h

-23
Original file line numberDiff line numberDiff line change
@@ -34,17 +34,8 @@ class MeshSpark final {
3434
const std::string& ifName,
3535
const openr::KvStoreLocalCmdUrl& kvStoreLocalCmdUrl,
3636
const openr::KvStoreLocalPubUrl& kvStoreLocalPubUrl,
37-
bool enableDomains,
3837
fbzmq::Context& zmqContext);
3938

40-
folly::Optional<folly::MacAddress> getDomain();
41-
42-
void setDomain(folly::Optional<folly::MacAddress> newDomain);
43-
44-
void updateCache(
45-
folly::MacAddress node,
46-
std::pair<folly::Optional<folly::MacAddress>, bool> domain);
47-
4839
private:
4940
/**
5041
* bind/connect to openr sockets
@@ -84,8 +75,6 @@ class MeshSpark final {
8475

8576
void filterWhiteListedPeers(std::vector<folly::MacAddress>& peers);
8677

87-
void filterInDomainPeers(std::vector<folly::MacAddress>& peers);
88-
8978
// ZmqEventLoop pointer for scheduling async events and socket callback
9079
// registration
9180
fbzmq::ZmqEventLoop& zmqLoop_;
@@ -124,16 +113,4 @@ class MeshSpark final {
124113
// node name -> ipv4 address
125114
std::unordered_map<folly::MacAddress, folly::IPAddressV4> kvStoreIPs_;
126115

127-
// enable domain filtering?
128-
const bool enableDomains_;
129-
130-
// Stores the current domain, nodes in the same domain can for OpenR peerings
131-
folly::Synchronized<folly::Optional<folly::MacAddress>> myDomain_;
132-
133-
// Cache for checking which nodes belong to which domain
134-
folly::Synchronized<std::unordered_map<
135-
folly::MacAddress,
136-
std::pair<folly::Optional<folly::MacAddress>, bool>>>
137-
neighborDomainCache_;
138-
139116
}; // MeshSpark

0 commit comments

Comments
 (0)