From a45f3419d066319e5bfd9ae08b9f502ddef4c651 Mon Sep 17 00:00:00 2001 From: Alyssa Wilk Date: Mon, 4 Dec 2017 10:34:49 -0500 Subject: [PATCH] Passing priority sets to loadbalancers Signed-off-by: Alyssa Wilk --- .../common/upstream/cluster_manager_impl.cc | 31 +++-- source/common/upstream/load_balancer_impl.cc | 22 ++-- source/common/upstream/load_balancer_impl.h | 22 ++-- .../common/upstream/original_dst_cluster.cc | 10 +- source/common/upstream/original_dst_cluster.h | 4 +- source/common/upstream/ring_hash_lb.cc | 5 +- source/common/upstream/ring_hash_lb.h | 2 +- source/common/upstream/subset_lb.cc | 81 +++++++++---- source/common/upstream/subset_lb.h | 45 +++++-- source/common/upstream/upstream_impl.cc | 7 +- source/common/upstream/upstream_impl.h | 10 +- test/common/upstream/eds_test.cc | 2 +- .../upstream/load_balancer_impl_test.cc | 58 ++++----- .../upstream/load_balancer_simulation_test.cc | 14 ++- .../upstream/original_dst_cluster_test.cc | 27 ++--- test/common/upstream/ring_hash_lb_test.cc | 48 ++++---- test/common/upstream/subset_lb_test.cc | 113 ++++++++++++------ test/common/upstream/upstream_impl_test.cc | 1 + 18 files changed, 306 insertions(+), 196 deletions(-) diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index a05038e3fa12..a965b0158f61 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -610,43 +610,40 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( parent.parent_.local_info_, parent.parent_, parent.parent_.runtime_, parent.parent_.random_, Router::ShadowWriterPtr{new Router::ShadowWriterImpl(parent.parent_)}) { - - // TODO(alyssawilk) make lb priority-set aware in a follow-up patch. - HostSet& host_set = priority_set_.getOrCreateHostSet(0); - HostSet* local_host_set = nullptr; - if (parent.local_priority_set_) { - local_host_set = parent.local_priority_set_->hostSetsPerPriority()[0].get(); - } + priority_set_.getOrCreateHostSet(0); if (cluster->lbSubsetInfo().isEnabled()) { - lb_.reset(new SubsetLoadBalancer(cluster->lbType(), host_set, local_host_set, cluster->stats(), - parent.parent_.runtime_, parent.parent_.random_, - cluster->lbSubsetInfo(), cluster->lbRingHashConfig())); + lb_.reset(new SubsetLoadBalancer(cluster->lbType(), priority_set_, parent_.local_priority_set_, + cluster->stats(), parent.parent_.runtime_, + parent.parent_.random_, cluster->lbSubsetInfo(), + cluster->lbRingHashConfig())); } else { switch (cluster->lbType()) { case LoadBalancerType::LeastRequest: { - lb_.reset(new LeastRequestLoadBalancer(host_set, local_host_set, cluster->stats(), - parent.parent_.runtime_, parent.parent_.random_)); + lb_.reset(new LeastRequestLoadBalancer(priority_set_, parent_.local_priority_set_, + cluster->stats(), parent.parent_.runtime_, + parent.parent_.random_)); break; } case LoadBalancerType::Random: { - lb_.reset(new RandomLoadBalancer(host_set, local_host_set, cluster->stats(), + lb_.reset(new RandomLoadBalancer(priority_set_, parent_.local_priority_set_, cluster->stats(), parent.parent_.runtime_, parent.parent_.random_)); break; } case LoadBalancerType::RoundRobin: { - lb_.reset(new RoundRobinLoadBalancer(host_set, local_host_set, cluster->stats(), - parent.parent_.runtime_, parent.parent_.random_)); + lb_.reset(new RoundRobinLoadBalancer(priority_set_, parent_.local_priority_set_, + cluster->stats(), parent.parent_.runtime_, + parent.parent_.random_)); break; } case LoadBalancerType::RingHash: { - lb_.reset(new RingHashLoadBalancer(host_set, cluster->stats(), parent.parent_.runtime_, + lb_.reset(new RingHashLoadBalancer(priority_set_, cluster->stats(), parent.parent_.runtime_, parent.parent_.random_, cluster->lbRingHashConfig())); break; } case LoadBalancerType::OriginalDst: { lb_.reset(new OriginalDstCluster::LoadBalancer( - host_set, parent.parent_.primary_clusters_.at(cluster->name()).cluster_)); + priority_set_, parent.parent_.primary_clusters_.at(cluster->name()).cluster_)); break; } } diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index a8a01da8c30b..daf0ff1dd36b 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -17,11 +17,13 @@ static const std::string RuntimeZoneEnabled = "upstream.zone_routing.enabled"; static const std::string RuntimeMinClusterSize = "upstream.zone_routing.min_cluster_size"; static const std::string RuntimePanicThreshold = "upstream.healthy_panic_threshold"; -LoadBalancerBase::LoadBalancerBase(const HostSet& host_set, const HostSet* local_host_set, - ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random) - : stats_(stats), runtime_(runtime), random_(random), host_set_(host_set), - local_host_set_(local_host_set) { +LoadBalancerBase::LoadBalancerBase(const PrioritySet& priority_set, + const PrioritySet* local_priority_set, ClusterStats& stats, + Runtime::Loader& runtime, Runtime::RandomGenerator& random) + : stats_(stats), runtime_(runtime), random_(random), + host_set_(*priority_set.hostSetsPerPriority()[0]), + local_host_set_(local_priority_set ? local_priority_set->hostSetsPerPriority()[0].get() + : nullptr) { if (local_host_set_) { host_set_.addMemberUpdateCb([this](uint32_t, const std::vector&, const std::vector&) -> void { @@ -243,13 +245,13 @@ HostConstSharedPtr RoundRobinLoadBalancer::chooseHost(LoadBalancerContext*) { return hosts_to_use[rr_index_++ % hosts_to_use.size()]; } -LeastRequestLoadBalancer::LeastRequestLoadBalancer(const HostSet& host_set, - const HostSet* local_host_set, +LeastRequestLoadBalancer::LeastRequestLoadBalancer(const PrioritySet& priority_set, + const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random) - : LoadBalancerBase(host_set, local_host_set, stats, runtime, random) { - host_set.addMemberUpdateCb([this](uint32_t, const std::vector&, - const std::vector& hosts_removed) -> void { + : LoadBalancerBase(priority_set, local_priority_set, stats, runtime, random) { + host_set_.addMemberUpdateCb([this](uint32_t, const std::vector&, + const std::vector& hosts_removed) -> void { if (last_host_) { for (const HostSharedPtr& host : hosts_removed) { if (host == last_host_) { diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 91f74b9d613b..b804715834ec 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -31,8 +31,8 @@ class LoadBalancerUtility { */ class LoadBalancerBase { protected: - LoadBalancerBase(const HostSet& host_set, const HostSet* local_host_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random); + LoadBalancerBase(const PrioritySet& priority_set, const PrioritySet* local_priority_set, + ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random); ~LoadBalancerBase(); /** @@ -44,6 +44,10 @@ class LoadBalancerBase { Runtime::Loader& runtime_; Runtime::RandomGenerator& random_; + // TODO(alyssawilk) make load balancers priority-aware and remove. +protected: + const HostSet& host_set_; + private: enum class LocalityRoutingState { NoLocalityRouting, LocalityDirect, LocalityResidual }; @@ -72,7 +76,6 @@ class LoadBalancerBase { */ void regenerateLocalityRoutingStructures(); - const HostSet& host_set_; const HostSet* local_host_set_; uint64_t local_percent_to_route_{}; LocalityRoutingState locality_routing_state_{LocalityRoutingState::NoLocalityRouting}; @@ -85,10 +88,10 @@ class LoadBalancerBase { */ class RoundRobinLoadBalancer : public LoadBalancer, LoadBalancerBase { public: - RoundRobinLoadBalancer(const HostSet& host_set, const HostSet* local_host_set_, + RoundRobinLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random) - : LoadBalancerBase(host_set, local_host_set_, stats, runtime, random) {} + : LoadBalancerBase(priority_set, local_priority_set, stats, runtime, random) {} // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; @@ -112,7 +115,7 @@ class RoundRobinLoadBalancer : public LoadBalancer, LoadBalancerBase { */ class LeastRequestLoadBalancer : public LoadBalancer, LoadBalancerBase { public: - LeastRequestLoadBalancer(const HostSet& host_set, const HostSet* local_host_set_, + LeastRequestLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random); @@ -129,9 +132,10 @@ class LeastRequestLoadBalancer : public LoadBalancer, LoadBalancerBase { */ class RandomLoadBalancer : public LoadBalancer, LoadBalancerBase { public: - RandomLoadBalancer(const HostSet& host_set, const HostSet* local_host_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random) - : LoadBalancerBase(host_set, local_host_set, stats, runtime, random) {} + RandomLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, + ClusterStats& stats, Runtime::Loader& runtime, + Runtime::RandomGenerator& random) + : LoadBalancerBase(priority_set, local_priority_set, stats, runtime, random) {} // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 02ab5ce49579..4245656a4100 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -19,12 +19,12 @@ namespace Upstream { // OriginalDstCluster::LoadBalancer is never configured with any other type of cluster, // and throws an exception otherwise. -OriginalDstCluster::LoadBalancer::LoadBalancer(HostSet& host_set, ClusterSharedPtr& parent) - : host_set_(host_set), parent_(std::static_pointer_cast(parent)), +OriginalDstCluster::LoadBalancer::LoadBalancer(PrioritySet& priority_set, ClusterSharedPtr& parent) + : priority_set_(priority_set), parent_(std::static_pointer_cast(parent)), info_(parent->info()) { - // host_set_ is initially empty. - host_set_.addMemberUpdateCb([this](uint32_t, const std::vector& hosts_added, - const std::vector& hosts_removed) -> void { + // priority_set_ is initially empty. + priority_set_.addMemberUpdateCb([this](uint32_t, const std::vector& hosts_added, + const std::vector& hosts_removed) -> void { // Update the hosts map for (const HostSharedPtr& host : hosts_removed) { ENVOY_LOG(debug, "Removing host {}.", host->address()->asString()); diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index f5cf036de7a2..0fcd723b39bf 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -42,7 +42,7 @@ class OriginalDstCluster : public ClusterImplBase { */ class LoadBalancer : public Upstream::LoadBalancer { public: - LoadBalancer(HostSet& host_set, ClusterSharedPtr& parent); + LoadBalancer(PrioritySet& priority_set, ClusterSharedPtr& parent); // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; @@ -91,7 +91,7 @@ class OriginalDstCluster : public ClusterImplBase { std::unordered_multimap map_; }; - HostSet& host_set_; // Thread local host set. + PrioritySet& priority_set_; // Thread local priority set. std::weak_ptr parent_; // Primary cluster managed by the main thread. ClusterInfoConstSharedPtr info_; HostMap host_map_; diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index fd34fab772fe..36673d0f5014 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -11,10 +11,11 @@ namespace Envoy { namespace Upstream { RingHashLoadBalancer::RingHashLoadBalancer( - HostSet& host_set, ClusterStats& stats, Runtime::Loader& runtime, + PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const Optional& config) - : host_set_(host_set), stats_(stats), runtime_(runtime), random_(random), config_(config) { + : host_set_(*priority_set.hostSetsPerPriority()[0]), stats_(stats), runtime_(runtime), + random_(random), config_(config) { host_set_.addMemberUpdateCb([this](uint32_t, const std::vector&, const std::vector&) -> void { refresh(); }); diff --git a/source/common/upstream/ring_hash_lb.h b/source/common/upstream/ring_hash_lb.h index da739a06f36b..793879504d9b 100644 --- a/source/common/upstream/ring_hash_lb.h +++ b/source/common/upstream/ring_hash_lb.h @@ -22,7 +22,7 @@ namespace Upstream { */ class RingHashLoadBalancer : public LoadBalancer, Logger::Loggable { public: - RingHashLoadBalancer(HostSet& host_set, ClusterStats& stats, Runtime::Loader& runtime, + RingHashLoadBalancer(PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const Optional& config); diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index c41950bde452..f6d89a68d616 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -17,23 +17,26 @@ namespace Envoy { namespace Upstream { SubsetLoadBalancer::SubsetLoadBalancer( - LoadBalancerType lb_type, HostSet& host_set, const HostSet* local_host_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set, + ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, const Optional& lb_ring_hash_config) : lb_type_(lb_type), lb_ring_hash_config_(lb_ring_hash_config), stats_(stats), runtime_(runtime), random_(random), fallback_policy_(subsets.fallbackPolicy()), default_subset_(subsets.defaultSubset()), subset_keys_(subsets.subsetKeys()), - original_host_set_(host_set), original_local_host_set_(local_host_set) { + original_priority_set_(priority_set), original_local_priority_set_(local_priority_set) { ASSERT(subsets.isEnabled()); // Create filtered default subset (if necessary) and other subsets based on current hosts. - update(host_set.hosts(), {}); + for (auto& host_set : priority_set.hostSetsPerPriority()) { + update(host_set->priority(), host_set->hosts(), {}); + } // Configure future updates. - host_set.addMemberUpdateCb([this](uint32_t, const std::vector& hosts_added, - const std::vector& hosts_removed) { - update(hosts_added, hosts_removed); + priority_set.addMemberUpdateCb([this](uint32_t priority, + const std::vector& hosts_added, + const std::vector& hosts_removed) { + update(priority, hosts_added, hosts_removed); }); } @@ -118,7 +121,8 @@ SubsetLoadBalancer::LbSubsetEntryPtr SubsetLoadBalancer::findSubset( return nullptr; } -void SubsetLoadBalancer::updateFallbackSubset(const std::vector& hosts_added, +void SubsetLoadBalancer::updateFallbackSubset(uint32_t priority, + const std::vector& hosts_added, const std::vector& hosts_removed) { if (fallback_policy_ == envoy::api::v2::Cluster::LbSubsetConfig::NO_FALLBACK) { return; @@ -139,7 +143,7 @@ void SubsetLoadBalancer::updateFallbackSubset(const std::vector& fallback_subset_->initLoadBalancer(*this, predicate); } else { // Subsequent updates: add/remove hosts. - fallback_subset_->host_subset_->update(hosts_added, hosts_removed, predicate); + fallback_subset_->priority_subset_->update(priority, hosts_added, hosts_removed, predicate); } } @@ -181,17 +185,18 @@ void SubsetLoadBalancer::processSubsets( } } -// Given the addition and/or removal of hosts, update all subsets, creating new subsets as -// necessary. -void SubsetLoadBalancer::update(const std::vector& hosts_added, +// Given the addition and/or removal of hosts, update all subsets for this priority level, creating +// new subsets as necessary. +void SubsetLoadBalancer::update(uint32_t priority, const std::vector& hosts_added, const std::vector& hosts_removed) { - updateFallbackSubset(hosts_added, hosts_removed); + updateFallbackSubset(priority, hosts_added, hosts_removed); processSubsets(hosts_added, hosts_removed, [&](LbSubsetEntryPtr entry, HostPredicate predicate, bool adding_host) { if (entry->initialized()) { const bool active_before = entry->active(); - entry->host_subset_->update(hosts_added, hosts_removed, predicate); + entry->priority_subset_->update(priority, hosts_added, hosts_removed, + predicate); if (active_before && !entry->active()) { stats_.lb_subsets_active_.dec(); @@ -316,28 +321,31 @@ SubsetLoadBalancer::findOrCreateSubset(LbSubsetMap& subsets, const SubsetMetadat // with the given predicate. void SubsetLoadBalancer::LbSubsetEntry::initLoadBalancer(const SubsetLoadBalancer& subset_lb, HostPredicate predicate) { - host_subset_.reset(new HostSubsetImpl(subset_lb.original_host_set_)); - host_subset_->update(subset_lb.original_host_set_.hosts(), {}, predicate); + priority_subset_.reset(new PrioritySubsetImpl(subset_lb.original_priority_set_)); + for (size_t i = 0; i < subset_lb.original_priority_set_.hostSetsPerPriority().size(); ++i) { + priority_subset_->update(i, subset_lb.original_priority_set_.hostSetsPerPriority()[i]->hosts(), + {}, predicate); + } switch (subset_lb.lb_type_) { case LoadBalancerType::LeastRequest: - lb_.reset(new LeastRequestLoadBalancer(*host_subset_, subset_lb.original_local_host_set_, - subset_lb.stats_, subset_lb.runtime_, - subset_lb.random_)); + lb_.reset(new LeastRequestLoadBalancer(*priority_subset_, + subset_lb.original_local_priority_set_, subset_lb.stats_, + subset_lb.runtime_, subset_lb.random_)); break; case LoadBalancerType::Random: - lb_.reset(new RandomLoadBalancer(*host_subset_, subset_lb.original_local_host_set_, + lb_.reset(new RandomLoadBalancer(*priority_subset_, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_, subset_lb.random_)); break; case LoadBalancerType::RoundRobin: - lb_.reset(new RoundRobinLoadBalancer(*host_subset_, subset_lb.original_local_host_set_, + lb_.reset(new RoundRobinLoadBalancer(*priority_subset_, subset_lb.original_local_priority_set_, subset_lb.stats_, subset_lb.runtime_, subset_lb.random_)); break; case LoadBalancerType::RingHash: - lb_.reset(new RingHashLoadBalancer(*host_subset_, subset_lb.stats_, subset_lb.runtime_, + lb_.reset(new RingHashLoadBalancer(*priority_subset_, subset_lb.stats_, subset_lb.runtime_, subset_lb.random_, subset_lb.lb_ring_hash_config_)); break; @@ -345,7 +353,7 @@ void SubsetLoadBalancer::LbSubsetEntry::initLoadBalancer(const SubsetLoadBalance NOT_REACHED; } - host_subset_->triggerCallbacks(); + priority_subset_->triggerCallbacks(); } // Given hosts_added and hosts_removed, update the underlying HostSet. The hosts_added Hosts must @@ -406,5 +414,32 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const std::vector filtered_added, filtered_removed); } +SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const PrioritySet& original_priority_set) + : PrioritySetImpl(), original_priority_set_(original_priority_set) { + for (size_t i = 0; i < original_priority_set_.hostSetsPerPriority().size(); ++i) { + empty_ &= getOrCreateHostSet(i).hosts().empty(); + } +} + +HostSetPtr SubsetLoadBalancer::PrioritySubsetImpl::createHostSet(uint32_t priority) { + RELEASE_ASSERT(priority < original_priority_set_.hostSetsPerPriority().size()); + return HostSetPtr{new HostSubsetImpl(*original_priority_set_.hostSetsPerPriority()[priority])}; +} + +void SubsetLoadBalancer::PrioritySubsetImpl::update(uint32_t priority, + const std::vector& hosts_added, + const std::vector& hosts_removed, + std::function predicate) { + HostSubsetImpl* host_subset = getOrCreateHostSubset(priority); + host_subset->update(hosts_added, hosts_removed, predicate); + + if (host_subset->hosts().empty() != empty_) { + empty_ = true; + for (auto& host_set : hostSetsPerPriority()) { + empty_ &= host_set->hosts().empty(); + } + } +} + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index eb90c30c52ed..5ee393bfe100 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -20,7 +20,7 @@ namespace Upstream { class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable { public: SubsetLoadBalancer( - LoadBalancerType lb_type, HostSet& host_set, const HostSet* local_host_set, + LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, const Optional& lb_ring_hash_config); @@ -47,7 +47,36 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable& hosts_added, + const std::vector& hosts_removed, HostPredicate predicate); + + bool empty() { return empty_; } + + HostSubsetImpl* getOrCreateHostSubset(uint32_t priority) { + return reinterpret_cast(&getOrCreateHostSet(priority)); + } + + void triggerCallbacks() { + for (size_t i = 0; i < hostSetsPerPriority().size(); ++i) { + getOrCreateHostSubset(i)->triggerCallbacks(); + } + } + + protected: + HostSetPtr createHostSet(uint32_t priority) override; + + private: + const PrioritySet& original_priority_set_; + bool empty_ = true; + }; + typedef std::shared_ptr HostSubsetImplPtr; + typedef std::shared_ptr PrioritySubsetImplPtr; typedef std::vector> SubsetMetadata; @@ -61,23 +90,23 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggableempty(); } + bool initialized() const { return lb_ != nullptr && priority_subset_ != nullptr; } + bool active() const { return initialized() && !priority_subset_->empty(); } void initLoadBalancer(const SubsetLoadBalancer& subset_lb, HostPredicate predicate); LbSubsetMap children_; // Only initialized if a match exists at this level. - HostSubsetImplPtr host_subset_; + PrioritySubsetImplPtr priority_subset_; LoadBalancerPtr lb_; }; // Called by HostSet::MemberUpdateCb - void update(const std::vector& hosts_added, + void update(uint32_t priority, const std::vector& hosts_added, const std::vector& hosts_removed); - void updateFallbackSubset(const std::vector& hosts_added, + void updateFallbackSubset(uint32_t priority, const std::vector& hosts_added, const std::vector& hosts_removed); void processSubsets(const std::vector& hosts_added, const std::vector& hosts_removed, @@ -106,8 +135,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable> subset_keys_; - const HostSet& original_host_set_; - const HostSet* original_local_host_set_; + const PrioritySet& original_priority_set_; + const PrioritySet* original_local_priority_set_; LbSubsetEntryPtr fallback_subset_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 5c48ad56a2f4..b257cf40f5c0 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -67,12 +67,10 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu void HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, std::min(128U, new_weight)); } -PrioritySetImpl::PrioritySetImpl() { getOrCreateHostSet(0); } - HostSet& PrioritySetImpl::getOrCreateHostSet(uint32_t priority) { if (host_sets_.size() < priority + 1) { for (size_t i = host_sets_.size(); i <= priority; ++i) { - host_sets_.push_back(HostSetPtr{new HostSetImpl(i)}); + host_sets_.push_back(HostSetPtr{createHostSet(i)}); host_sets_[i]->addMemberUpdateCb([this](uint32_t priority, const std::vector& hosts_added, const std::vector& hosts_removed) { @@ -234,6 +232,9 @@ ClusterImplBase::ClusterImplBase(const envoy::api::v2::Cluster& cluster, Ssl::ContextManager& ssl_context_manager, bool added_via_api) : runtime_(runtime), info_(new ClusterInfoImpl(cluster, source_address, runtime, stats, ssl_context_manager, added_via_api)) { + // Create the default (empty) priority set before registering callbacks to + // avoid getting an update the first time it is accessed. + priority_set_.getOrCreateHostSet(0); priority_set_.addMemberUpdateCb([this](uint32_t, const std::vector& hosts_added, const std::vector& hosts_removed) { if (!hosts_added.empty() || !hosts_removed.empty()) { diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index d0049b45effa..615392f20ef3 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -231,8 +231,6 @@ typedef std::unique_ptr HostSetImplPtr; class PrioritySetImpl : public PrioritySet { public: - PrioritySetImpl(); - // From PrioritySet Common::CallbackHandle* addMemberUpdateCb(MemberUpdateCb callback) const override { return member_update_cb_helper_.add(callback); @@ -244,12 +242,18 @@ class PrioritySetImpl : public PrioritySet { // Get the host set for this priority level, creating it if necessary. HostSet& getOrCreateHostSet(uint32_t priority); +protected: + // Allows subclasses of PrioritySetImpl to create their own type of HostSet. + virtual HostSetPtr createHostSet(uint32_t priority) { + return HostSetPtr{new HostSetImpl(priority)}; + } + private: virtual void runUpdateCallbacks(uint32_t priority, const std::vector& hosts_added, const std::vector& hosts_removed) { member_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed); } - // This vector will always have at lest one member, for priority level 0. + // This vector will generally have at least one member, for priority level 0. // It will expand as host sets are added but currently does not shrink to // avoid any potential lifetime issues. std::vector> host_sets_; diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 2d53d3919189..6a9e30ee73b5 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -330,7 +330,7 @@ TEST_F(EdsTest, EndpointHostsPerPriority) { EXPECT_EQ(4, cluster_->prioritySet().hostSetsPerPriority()[3]->hosts().size()); } -// Set up an EDS config with multiple priorities and localitie and make sure +// Set up an EDS config with multiple priorities and localities and make sure // they are loaded and reloaded as expected. TEST_F(EdsTest, PriorityAndLocality) { Protobuf::RepeatedPtrField resources; diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index e9e685777848..5246593868a4 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -22,29 +22,33 @@ using testing::Return; namespace Envoy { namespace Upstream { -class RoundRobinLoadBalancerTest : public testing::Test { -public: - RoundRobinLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} +class LoadBalancerTestBase : public testing::Test { +protected: + LoadBalancerTestBase() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} + Stats::IsolatedStoreImpl stats_store_; + ClusterStats stats_; + NiceMock runtime_; + NiceMock random_; + NiceMock priority_set_; + MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); + std::shared_ptr info_{new NiceMock()}; +}; +class RoundRobinLoadBalancerTest : public LoadBalancerTestBase { +public: void init(bool need_local_cluster) { if (need_local_cluster) { - local_host_set_.reset(new HostSetImpl(0)); - lb_.reset( - new RoundRobinLoadBalancer(host_set_, local_host_set_.get(), stats_, runtime_, random_)); - } else { - lb_.reset(new RoundRobinLoadBalancer(host_set_, nullptr, stats_, runtime_, random_)); + local_priority_set_.reset(new PrioritySetImpl()); + local_host_set_ = reinterpret_cast(&local_priority_set_->getOrCreateHostSet(0)); } + lb_.reset(new RoundRobinLoadBalancer(priority_set_, local_priority_set_.get(), stats_, runtime_, + random_)); } - NiceMock runtime_; - NiceMock random_; - Stats::IsolatedStoreImpl stats_store_; - ClusterStats stats_; - NiceMock host_set_; - std::shared_ptr local_host_set_; + std::shared_ptr local_priority_set_; + HostSetImpl* local_host_set_{nullptr}; std::shared_ptr lb_; std::vector empty_host_vector_; - std::shared_ptr info_{new NiceMock()}; }; TEST_F(RoundRobinLoadBalancerTest, NoHosts) { @@ -363,17 +367,9 @@ TEST_F(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmpty) { EXPECT_EQ(1U, stats_.lb_local_cluster_not_ok_.value()); } -class LeastRequestLoadBalancerTest : public testing::Test { +class LeastRequestLoadBalancerTest : public LoadBalancerTestBase { public: - LeastRequestLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} - - NiceMock runtime_; - NiceMock random_; - Stats::IsolatedStoreImpl stats_store_; - ClusterStats stats_; - NiceMock host_set_; - LeastRequestLoadBalancer lb_{host_set_, nullptr, stats_, runtime_, random_}; - std::shared_ptr info_{new NiceMock()}; + LeastRequestLoadBalancer lb_{priority_set_, nullptr, stats_, runtime_, random_}; }; TEST_F(LeastRequestLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } @@ -522,17 +518,9 @@ TEST_F(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) { EXPECT_EQ(host_set_.healthy_hosts_[0], lb_.chooseHost(nullptr)); } -class RandomLoadBalancerTest : public testing::Test { +class RandomLoadBalancerTest : public LoadBalancerTestBase { public: - RandomLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} - - NiceMock host_set_; - std::shared_ptr info_{new NiceMock()}; - NiceMock runtime_; - NiceMock random_; - Stats::IsolatedStoreImpl stats_store_; - ClusterStats stats_; - RandomLoadBalancer lb_{host_set_, nullptr, stats_, runtime_, random_}; + RandomLoadBalancer lb_{priority_set_, nullptr, stats_, runtime_, random_}; }; TEST_F(RandomLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index 026a33395e32..d8c8f1609dde 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -54,9 +54,9 @@ class DISABLED_SimulationTest : public testing::Test { */ void run(std::vector originating_cluster, std::vector all_destination_cluster, std::vector healthy_destination_cluster) { - local_host_set_ = new HostSetImpl(0); + local_priority_set_ = new PrioritySetImpl; // TODO(mattklein123): make load balancer per originating cluster host. - RandomLoadBalancer lb(host_set_, local_host_set_, stats_, runtime_, random_); + RandomLoadBalancer lb(priority_set_, local_priority_set_, stats_, runtime_, random_); HostListsSharedPtr upstream_per_zone_hosts = generateHostsPerZone(healthy_destination_cluster); HostListsSharedPtr local_per_zone_hosts = generateHostsPerZone(originating_cluster); @@ -95,8 +95,9 @@ class DISABLED_SimulationTest : public testing::Test { per_zone_local->push_back((*local_per_zone_hosts)[zone]); } - local_host_set_->updateHosts(originating_hosts, originating_hosts, per_zone_local, - per_zone_local, empty_vector_, empty_vector_); + local_priority_set_->getOrCreateHostSet(0).updateHosts(originating_hosts, originating_hosts, + per_zone_local, per_zone_local, + empty_vector_, empty_vector_); HostConstSharedPtr selected = lb.chooseHost(nullptr); hits[selected->address()->asString()]++; @@ -157,8 +158,9 @@ class DISABLED_SimulationTest : public testing::Test { const uint32_t total_number_of_requests = 1000000; std::vector empty_vector_; - HostSetImpl* local_host_set_; - NiceMock host_set_; + PrioritySetImpl* local_priority_set_; + NiceMock priority_set_; + MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); std::shared_ptr info_{new NiceMock()}; NiceMock runtime_; Runtime::RandomGeneratorImpl random_; diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 61ccc3295b87..5b9f1aa168aa 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -143,8 +143,7 @@ TEST_F(OriginalDstClusterTest, NoContext) { // No downstream connection => no host. { TestLoadBalancerContext lb_context(nullptr); - OriginalDstCluster::LoadBalancer lb(*cluster_->prioritySet().hostSetsPerPriority()[0], - cluster_); + OriginalDstCluster::LoadBalancer lb(cluster_->prioritySet(), cluster_); EXPECT_CALL(dispatcher_, post(_)).Times(0); HostConstSharedPtr host = lb.chooseHost(&lb_context); EXPECT_EQ(host, nullptr); @@ -159,8 +158,7 @@ TEST_F(OriginalDstClusterTest, NoContext) { // First argument is normally the reference to the ThreadLocalCluster's HostSet, but in these // tests we do not have the thread local clusters, so we pass a reference to the HostSet of the // primary cluster. The implementation handles both cases the same. - OriginalDstCluster::LoadBalancer lb(*cluster_->prioritySet().hostSetsPerPriority()[0], - cluster_); + OriginalDstCluster::LoadBalancer lb(cluster_->prioritySet(), cluster_); EXPECT_CALL(dispatcher_, post(_)).Times(0); HostConstSharedPtr host = lb.chooseHost(&lb_context); EXPECT_EQ(host, nullptr); @@ -174,8 +172,7 @@ TEST_F(OriginalDstClusterTest, NoContext) { EXPECT_CALL(connection, localAddress()).WillRepeatedly(ReturnRef(local_address)); EXPECT_CALL(connection, usingOriginalDst()).WillRepeatedly(Return(true)); - OriginalDstCluster::LoadBalancer lb(*cluster_->prioritySet().hostSetsPerPriority()[0], - cluster_); + OriginalDstCluster::LoadBalancer lb(cluster_->prioritySet(), cluster_); EXPECT_CALL(dispatcher_, post(_)).Times(0); HostConstSharedPtr host = lb.chooseHost(&lb_context); EXPECT_EQ(host, nullptr); @@ -212,7 +209,7 @@ TEST_F(OriginalDstClusterTest, Membership) { EXPECT_CALL(connection, localAddress()).WillRepeatedly(ReturnRef(local_address)); EXPECT_CALL(connection, usingOriginalDst()).WillRepeatedly(Return(true)); - OriginalDstCluster::LoadBalancer lb(*cluster_->prioritySet().hostSetsPerPriority()[0], cluster_); + OriginalDstCluster::LoadBalancer lb(cluster_->prioritySet(), cluster_); Event::PostCb post_cb; EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); HostConstSharedPtr host = lb.chooseHost(&lb_context); @@ -306,7 +303,7 @@ TEST_F(OriginalDstClusterTest, Membership2) { EXPECT_CALL(connection2, localAddress()).WillRepeatedly(ReturnRef(local_address2)); EXPECT_CALL(connection2, usingOriginalDst()).WillRepeatedly(Return(true)); - OriginalDstCluster::LoadBalancer lb(*cluster_->prioritySet().hostSetsPerPriority()[0], cluster_); + OriginalDstCluster::LoadBalancer lb(cluster_->prioritySet(), cluster_); EXPECT_CALL(membership_updated_, ready()); Event::PostCb post_cb; @@ -392,7 +389,7 @@ TEST_F(OriginalDstClusterTest, Connection) { EXPECT_CALL(connection, localAddress()).WillRepeatedly(ReturnRef(local_address)); EXPECT_CALL(connection, usingOriginalDst()).WillRepeatedly(Return(true)); - OriginalDstCluster::LoadBalancer lb(*cluster_->prioritySet().hostSetsPerPriority()[0], cluster_); + OriginalDstCluster::LoadBalancer lb(cluster_->prioritySet(), cluster_); Event::PostCb post_cb; EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); HostConstSharedPtr host = lb.chooseHost(&lb_context); @@ -419,7 +416,7 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { EXPECT_CALL(*cleanup_timer_, enableTimer(_)); setup(json); - HostSetImpl second(0); + PrioritySetImpl second; cluster_->prioritySet().hostSetsPerPriority()[0]->addMemberUpdateCb( [&](uint32_t, const std::vector& added, const std::vector& removed) -> void { @@ -431,8 +428,8 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { const HostListsConstSharedPtr empty_host_lists{ new std::vector>()}; - second.updateHosts(new_hosts, healthy_hosts, empty_host_lists, empty_host_lists, added, - removed); + second.getOrCreateHostSet(0).updateHosts(new_hosts, healthy_hosts, empty_host_lists, + empty_host_lists, added, removed); }); EXPECT_CALL(membership_updated_, ready()); @@ -444,7 +441,7 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { EXPECT_CALL(connection, localAddress()).WillRepeatedly(ReturnRef(local_address)); EXPECT_CALL(connection, usingOriginalDst()).WillRepeatedly(Return(true)); - OriginalDstCluster::LoadBalancer lb1(*cluster_->prioritySet().hostSetsPerPriority()[0], cluster_); + OriginalDstCluster::LoadBalancer lb1(cluster_->prioritySet(), cluster_); OriginalDstCluster::LoadBalancer lb2(second, cluster_); Event::PostCb post_cb; EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); @@ -455,10 +452,10 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); // Check that lb2 also gets updated - EXPECT_EQ(1UL, second.hosts().size()); + EXPECT_EQ(1UL, second.hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]); - EXPECT_EQ(host, second.hosts()[0]); + EXPECT_EQ(host, second.hostSetsPerPriority()[0]->hosts()[0]); } } // namespace OriginalDstClusterTest diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index cd9e7f87c4c4..ad8519cd5e60 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -37,18 +37,24 @@ class RingHashLoadBalancerTest : public testing::Test { public: RingHashLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} - NiceMock host_set_; + void init() { + lb_.reset(new RingHashLoadBalancer(priority_set_, stats_, runtime_, random_, config_)); + } + + NiceMock priority_set_; + MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); std::shared_ptr info_{new NiceMock()}; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; Optional config_; NiceMock runtime_; NiceMock random_; + std::unique_ptr lb_; }; TEST_F(RingHashLoadBalancerTest, NoHost) { - RingHashLoadBalancer lb{host_set_, stats_, runtime_, random_, config_}; - EXPECT_EQ(nullptr, lb.chooseHost(nullptr)); + init(); + EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); }; TEST_F(RingHashLoadBalancerTest, Basic) { @@ -63,7 +69,7 @@ TEST_F(RingHashLoadBalancerTest, Basic) { config_.value().mutable_minimum_ring_size()->set_value(12); config_.value().mutable_deprecated_v1()->mutable_use_std_hash()->set_value(false); - RingHashLoadBalancer lb{host_set_, stats_, runtime_, random_, config_}; + init(); // hash ring: // port | position @@ -83,23 +89,23 @@ TEST_F(RingHashLoadBalancerTest, Basic) { { TestLoadBalancerContext context(0); - EXPECT_EQ(host_set_.hosts_[4], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[4], lb_->chooseHost(&context)); } { TestLoadBalancerContext context(std::numeric_limits::max()); - EXPECT_EQ(host_set_.hosts_[4], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[4], lb_->chooseHost(&context)); } { TestLoadBalancerContext context(3551244743356806947); - EXPECT_EQ(host_set_.hosts_[5], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[5], lb_->chooseHost(&context)); } { TestLoadBalancerContext context(3551244743356806948); - EXPECT_EQ(host_set_.hosts_[3], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context)); } { EXPECT_CALL(random_, random()).WillOnce(Return(16117243373044804880UL)); - EXPECT_EQ(host_set_.hosts_[0], lb.chooseHost(nullptr)); + EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(nullptr)); } EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value()); @@ -107,7 +113,7 @@ TEST_F(RingHashLoadBalancerTest, Basic) { host_set_.runCallbacks({}, {}); { TestLoadBalancerContext context(0); - EXPECT_EQ(host_set_.hosts_[4], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[4], lb_->chooseHost(&context)); } EXPECT_EQ(1UL, stats_.lb_healthy_panic_.value()); } @@ -127,7 +133,7 @@ TEST_F(RingHashLoadBalancerTest, BasicWithStdHash) { // use_std_hash defaults to true so don't set it here. config_.value(envoy::api::v2::Cluster::RingHashLbConfig()); config_.value().mutable_minimum_ring_size()->set_value(12); - RingHashLoadBalancer lb{host_set_, stats_, runtime_, random_, config_}; + init(); // This is the hash ring built using the default hash (probably murmur2) on GCC 5.4. // ring hash: host=127.0.0.1:85 hash=1358027074129602068 @@ -144,23 +150,23 @@ TEST_F(RingHashLoadBalancerTest, BasicWithStdHash) { // ring hash: host=127.0.0.1:80 hash=17613279263364193813 { TestLoadBalancerContext context(0); - EXPECT_EQ(host_set_.hosts_[5], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[5], lb_->chooseHost(&context)); } { TestLoadBalancerContext context(std::numeric_limits::max()); - EXPECT_EQ(host_set_.hosts_[5], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[5], lb_->chooseHost(&context)); } { TestLoadBalancerContext context(1358027074129602068); - EXPECT_EQ(host_set_.hosts_[5], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[5], lb_->chooseHost(&context)); } { TestLoadBalancerContext context(1358027074129602069); - EXPECT_EQ(host_set_.hosts_[3], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[3], lb_->chooseHost(&context)); } { EXPECT_CALL(random_, random()).WillOnce(Return(10150910876324007730UL)); - EXPECT_EQ(host_set_.hosts_[2], lb.chooseHost(nullptr)); + EXPECT_EQ(host_set_.hosts_[2], lb_->chooseHost(nullptr)); } EXPECT_EQ(0UL, stats_.lb_healthy_panic_.value()); } @@ -174,7 +180,7 @@ TEST_F(RingHashLoadBalancerTest, UnevenHosts) { config_.value(envoy::api::v2::Cluster::RingHashLbConfig()); config_.value().mutable_minimum_ring_size()->set_value(3); config_.value().mutable_deprecated_v1()->mutable_use_std_hash()->set_value(false); - RingHashLoadBalancer lb{host_set_, stats_, runtime_, random_, config_}; + init(); // hash ring: // port | position @@ -186,7 +192,7 @@ TEST_F(RingHashLoadBalancerTest, UnevenHosts) { { TestLoadBalancerContext context(0); - EXPECT_EQ(host_set_.hosts_[0], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context)); } host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81"), @@ -203,7 +209,7 @@ TEST_F(RingHashLoadBalancerTest, UnevenHosts) { { TestLoadBalancerContext context(0); - EXPECT_EQ(host_set_.hosts_[0], lb.chooseHost(&context)); + EXPECT_EQ(host_set_.hosts_[0], lb_->chooseHost(&context)); } } @@ -249,11 +255,11 @@ TEST_F(DISABLED_RingHashLoadBalancerTest, DetermineSpread) { config_.value(envoy::api::v2::Cluster::RingHashLbConfig()); config_.value().mutable_minimum_ring_size()->set_value(min_ring_size); config_.value().mutable_deprecated_v1()->mutable_use_std_hash()->set_value(false); - RingHashLoadBalancer lb{host_set_, stats_, runtime_, random_, config_}; + RingHashLoadBalancer lb{priority_set_, stats_, runtime_, random_, config_}; for (uint64_t i = 0; i < keys_to_simulate; i++) { TestLoadBalancerContext context(std::hash()(fmt::format("{}", i))); - hit_counter[lb.chooseHost(&context)->address()->asString()] += 1; + hit_counter[lb_->chooseHost(&context)->address()->asString()] += 1; } std::cout << fmt::format("{:<9} {:<4} {:<20}", "hits", "%hit", "server") << std::endl; diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 5b5b10b412e2..f883ee0336cc 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -95,21 +95,33 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { }); } - void init(const HostURLMetadataMap& host_metadata) { - EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true)); - + void configureHostSet(const HostURLMetadataMap& host_metadata, MockHostSet& host_set) { std::vector hosts; for (const auto& it : host_metadata) { hosts.emplace_back(makeHost(it.first, it.second)); } - host_set_.hosts_ = hosts; - host_set_.hosts_per_locality_ = std::vector>({hosts}); + host_set.hosts_ = hosts; + host_set.hosts_per_locality_ = std::vector>({hosts}); + host_set.healthy_hosts_ = host_set.hosts_; + host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_; + } - host_set_.healthy_hosts_ = host_set_.hosts_; - host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; + void init(const HostURLMetadataMap& host_metadata) { + HostURLMetadataMap failover; + init(host_metadata, failover); + } + + void init(const HostURLMetadataMap& host_metadata, + const HostURLMetadataMap& failover_host_metadata) { + EXPECT_CALL(subset_info_, isEnabled()).WillRepeatedly(Return(true)); - lb_.reset(new SubsetLoadBalancer(lb_type_, host_set_, nullptr, stats_, runtime_, random_, + configureHostSet(host_metadata, host_set_); + if (!failover_host_metadata.empty()) { + configureHostSet(failover_host_metadata, *priority_set_.getMockHostSet(1)); + } + + lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, runtime_, random_, subset_info_, ring_hash_lb_config_)); } @@ -147,12 +159,11 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { local_hosts_per_locality_->emplace_back(local_locality_hosts); } - local_host_set_.reset(new HostSetImpl(0)); - local_host_set_->updateHosts(local_hosts_, local_hosts_, local_hosts_per_locality_, - local_hosts_per_locality_, {}, {}); + local_priority_set_.getOrCreateHostSet(0).updateHosts( + local_hosts_, local_hosts_, local_hosts_per_locality_, local_hosts_per_locality_, {}, {}); - lb_.reset(new SubsetLoadBalancer(lb_type_, host_set_, local_host_set_.get(), stats_, runtime_, - random_, subset_info_, ring_hash_lb_config_)); + lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, &local_priority_set_, stats_, + runtime_, random_, subset_info_, ring_hash_lb_config_)); } HostSharedPtr makeHost(const std::string& url, const HostMetadata& metadata) { @@ -179,34 +190,35 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { } void modifyHosts(std::vector add, std::vector remove, - Optional add_in_locality = {}) { + Optional add_in_locality = {}, uint32_t priority = 0) { + MockHostSet& host_set = *priority_set_.getMockHostSet(priority); for (const auto& host : remove) { - auto it = std::find(host_set_.hosts_.begin(), host_set_.hosts_.end(), host); - if (it != host_set_.hosts_.end()) { - host_set_.hosts_.erase(it); + auto it = std::find(host_set.hosts_.begin(), host_set.hosts_.end(), host); + if (it != host_set.hosts_.end()) { + host_set.hosts_.erase(it); } - host_set_.healthy_hosts_ = host_set_.hosts_; + host_set.healthy_hosts_ = host_set.hosts_; - for (auto& locality_hosts : host_set_.hosts_per_locality_) { + for (auto& locality_hosts : host_set.hosts_per_locality_) { auto it = std::find(locality_hosts.begin(), locality_hosts.end(), host); if (it != locality_hosts.end()) { locality_hosts.erase(it); } } - host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; + host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_; } if (GetParam() == REMOVES_FIRST && !remove.empty()) { - host_set_.runCallbacks({}, remove); + host_set.runCallbacks({}, remove); } for (const auto& host : add) { - host_set_.hosts_.emplace_back(host); - host_set_.healthy_hosts_ = host_set_.hosts_; + host_set.hosts_.emplace_back(host); + host_set.healthy_hosts_ = host_set.hosts_; if (add_in_locality.valid()) { - host_set_.hosts_per_locality_[add_in_locality.value()].emplace_back(host); - host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; + host_set.hosts_per_locality_[add_in_locality.value()].emplace_back(host); + host_set.healthy_hosts_per_locality_ = host_set.hosts_per_locality_; } } @@ -236,8 +248,9 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { } if (GetParam() == REMOVES_FIRST && !remove.empty()) { - local_host_set_->updateHosts(local_hosts_, local_hosts_, local_hosts_per_locality_, - local_hosts_per_locality_, {}, remove); + local_priority_set_.getOrCreateHostSet(0).updateHosts(local_hosts_, local_hosts_, + local_hosts_per_locality_, + local_hosts_per_locality_, {}, remove); } for (const auto& host : add) { @@ -247,17 +260,20 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { if (GetParam() == REMOVES_FIRST) { if (!add.empty()) { - local_host_set_->updateHosts(local_hosts_, local_hosts_, local_hosts_per_locality_, - local_hosts_per_locality_, add, {}); + local_priority_set_.getOrCreateHostSet(0).updateHosts(local_hosts_, local_hosts_, + local_hosts_per_locality_, + local_hosts_per_locality_, add, {}); } } else if (!add.empty() || !remove.empty()) { - local_host_set_->updateHosts(local_hosts_, local_hosts_, local_hosts_per_locality_, - local_hosts_per_locality_, add, remove); + local_priority_set_.getOrCreateHostSet(0).updateHosts(local_hosts_, local_hosts_, + local_hosts_per_locality_, + local_hosts_per_locality_, add, remove); } } LoadBalancerType lb_type_{LoadBalancerType::RoundRobin}; - NiceMock host_set_; + NiceMock priority_set_; + MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); NiceMock subset_info_; std::shared_ptr info_{new NiceMock()}; envoy::api::v2::Cluster::RingHashLbConfig ring_hash_lb_config_; @@ -265,10 +281,10 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { NiceMock random_; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; - std::shared_ptr local_host_set_; + PrioritySetImpl local_priority_set_; HostVectorSharedPtr local_hosts_; HostListsSharedPtr local_hosts_per_locality_; - std::shared_ptr lb_; + std::shared_ptr lb_; }; TEST_F(SubsetLoadBalancerTest, NoFallback) { @@ -449,6 +465,33 @@ TEST_P(SubsetLoadBalancerTest, BalancesSubsetAfterUpdate) { EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); } +// Test that adding backends to a failover group causes no problems. +TEST_P(SubsetLoadBalancerTest, UpdateFailover) { + EXPECT_CALL(subset_info_, fallbackPolicy()) + .WillRepeatedly(Return(envoy::api::v2::Cluster::LbSubsetConfig::NO_FALLBACK)); + + std::vector> subset_keys = {{"version"}}; + EXPECT_CALL(subset_info_, subsetKeys()).WillRepeatedly(ReturnRef(subset_keys)); + TestLoadBalancerContext context_10({{"version", "1.0"}}); + + // Start with an empty lb. Chosing a host should result in failure. + init({}); + EXPECT_TRUE(nullptr == lb_->chooseHost(&context_10).get()); + + // Add hosts to the group at priority 1. As no load balancer will select from + // failovers yet, this still results in being unable to choose a host. + modifyHosts({makeHost("tcp://127.0.0.1:8000", {{"version", "1.2"}}), + makeHost("tcp://127.0.0.1:8001", {{"version", "1.0"}})}, + {}, {}, 1); + EXPECT_TRUE(nullptr == lb_->chooseHost(&context_10).get()); + + // Finally update the priority 0 hosts. The LB should now select hosts. + modifyHosts({makeHost("tcp://127.0.0.1:8000", {{"version", "1.2"}}), + makeHost("tcp://127.0.0.1:8001", {{"version", "1.0"}})}, + {}, {}, 0); + EXPECT_FALSE(nullptr == lb_->chooseHost(&context_10).get()); +} + TEST_P(SubsetLoadBalancerTest, UpdateRemovingLastSubsetHost) { EXPECT_CALL(subset_info_, fallbackPolicy()) .WillRepeatedly(Return(envoy::api::v2::Cluster::LbSubsetConfig::ANY_ENDPOINT)); @@ -639,7 +682,7 @@ TEST_F(SubsetLoadBalancerTest, IgnoresHostsWithoutMetadata) { host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; - lb_.reset(new SubsetLoadBalancer(lb_type_, host_set_, nullptr, stats_, runtime_, random_, + lb_.reset(new SubsetLoadBalancer(lb_type_, priority_set_, nullptr, stats_, runtime_, random_, subset_info_, ring_hash_lb_config_)); TestLoadBalancerContext context_version({{"version", "1.0"}}); diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 2fe9ea1191e5..de6ca5ebddd6 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -659,6 +659,7 @@ TEST(StaticClusterImplTest, SourceAddressPriority) { // Test creating and extending a priority set. TEST(PrioritySet, Extend) { PrioritySetImpl priority_set; + priority_set.getOrCreateHostSet(0); uint32_t changes = 0; uint32_t last_priority = 0;