From 47e2838255108660cc36f2e8e3bd90e331bcd747 Mon Sep 17 00:00:00 2001 From: Leonardo da Mata Date: Thu, 26 Oct 2023 14:53:55 +0100 Subject: [PATCH 1/8] Add FULL_SCAN mode to least request load balancer By default, the least request load balancer returns the host with the fewest active requests from a set of N randomly selected hosts. This introduces a new "full scan" selection method that returns the host with the fewest number of active requests from all hosts. If multiple hosts are tied for "least", a host is selected using an approximately random means that is time and memory efficient. The selection probability among tied hosts may not be equal, depending on: - The host count - How many hosts are tied - How those tied hosts are distributed within the hosts array Signed-off-by: Jared Kirschner Signed-off-by: Leonardo da Mata --- .../least_request/v3/least_request.proto | 20 +++- changelogs/current.yaml | 7 ++ source/common/upstream/load_balancer_impl.cc | 85 ++++++++++++-- source/common/upstream/load_balancer_impl.h | 22 +++- .../upstream/load_balancer_impl_test.cc | 109 ++++++++++++++++++ 5 files changed, 227 insertions(+), 16 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index ebef61852e21..11f9e15fde12 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -22,10 +22,20 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy // extension point. See the :ref:`load balancing architecture overview // ` for more information. -// [#next-free-field: 6] message LeastRequest { + // Available methods for selecting the host set from which to return the host with the + // fewest active requests. + enum SelectionMethod { + // Return host with fewest requests from a set of ``choice_count`` randomly selected hosts + N_CHOICES = 0; + + // Return host with fewest requests from all hosts + FULL_SCAN = 1; + } + // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + // Only applies to the ``N_CHOICES`` selection method. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; // The following formula is used to calculate the dynamic weights when hosts have different load @@ -61,8 +71,10 @@ message LeastRequest { common.v3.LocalityLbConfig locality_lb_config = 4; // [#not-implemented-hide:] - // Configuration for performing full scan on the list of hosts. - // If this configuration is set, when selecting the host a full scan on the list hosts will be - // used to select the one with least requests instead of using random choices. + // Unused. Replaced by the `selection_method` enum for better extensibility. google.protobuf.BoolValue enable_full_scan = 5; + + // Method for selecting the host set from which to return the host with the fewest active requests. + // Defaults to ``N_CHOICES``. + SelectionMethod selection_method = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 9ecf0d6e48ce..3ea6bc61c4a5 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -13,5 +13,12 @@ removed_config_or_runtime: # *Normally occurs at the end of the* :ref:`deprecation period ` new_features: +- area: upstream + change: | + Added :ref:`selection_method ` + option to the least request load balancer. If set to ``FULL_SCAN``, + Envoy will select the host with the fewest active requests from the entire host set rather than + :ref:`choice_count ` + random choices. deprecated: diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index c85565bfc6fc..05de23f2ba67 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1299,25 +1299,88 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector const HostsSource&) { HostSharedPtr candidate_host = nullptr; + switch (selection_method_) { + case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_FULL_SCAN: + candidate_host = unweightedHostPickFullScan(hosts_to_use); + break; + case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_N_CHOICES: + default: + candidate_host = unweightedHostPickNChoices(hosts_to_use); + } + + return candidate_host; +} + +HostSharedPtr LeastRequestLoadBalancer::unweightedHostPickFullScan(const HostVector& hosts_to_use) { + HostSharedPtr candidate_host = nullptr; + + // "Randomly" scan the host array using a linear congruential generator with the following parameters: + // - The "modulus" is the number of hosts + // - The "multiplier" is 1 + // - The "increment" is a non-zero value less than the modulus. + // It is guaranteed non-zero because it is determined by: + // random large prime number / modulus ... which never be 0 + // - The "start value" is a random index in the host array + // + // With these parameters, the generator has a period equal to the modulus (number of hosts). + // + // The effect of this algorithm is to visit each index of the host array exactly once. + // The next index will always be +"increment" indices from the current index. + // + // This algorithm is not *very* random, but it is random *enough* for its purpose. + // Approaches with more randomness were not used because they are less performant for larger host arrays + // (e.g., requiring the allocation of an array the length of the host array). + + + // Because the initial value of host_scan_index will always be the last index scanned, + // it must be a random index. + unsigned long host_scan_index = random_.random() % hosts_to_use.size(); + unsigned long num_hosts = hosts_to_use.size(); + + // Choose a random large prime greater than the host array length to ensure the "increment" + // can be any value between 0 and the array length that is coprime with the array length. + // By using a random large prime in each scan, the "increment" can differ between scans, + // and therefore the sequences of indices visited can differ between scans. This behavior is + // important to avoid selecting the exact same host every time if multiple hosts tie for + // least requests. + unsigned long large_prime = LARGE_PRIME_CHOICES[random_.random() % LARGE_PRIME_CHOICES_LEN]; + + for (unsigned long i = 0; i < num_hosts; ++i) { + host_scan_index = (host_scan_index + large_prime) % num_hosts; + const HostSharedPtr& sampled_host = hosts_to_use[host_scan_index]; + + candidate_host = pickLeastFromCandidateAndNewSample(candidate_host, sampled_host); + } + + return candidate_host; +} + +HostSharedPtr LeastRequestLoadBalancer::unweightedHostPickNChoices(const HostVector& hosts_to_use) { + HostSharedPtr candidate_host = nullptr; + for (uint32_t choice_idx = 0; choice_idx < choice_count_; ++choice_idx) { const int rand_idx = random_.random() % hosts_to_use.size(); const HostSharedPtr& sampled_host = hosts_to_use[rand_idx]; - if (candidate_host == nullptr) { + candidate_host = pickLeastFromCandidateAndNewSample(candidate_host, sampled_host); + } - // Make a first choice to start the comparisons. - candidate_host = sampled_host; - continue; - } + return candidate_host; +} - const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); - const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); - if (sampled_active_rq < candidate_active_rq) { - candidate_host = sampled_host; - } +HostSharedPtr LeastRequestLoadBalancer::pickLeastFromCandidateAndNewSample(HostSharedPtr candidate_host, + HostSharedPtr sampled_host) { + if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. + return sampled_host; } - return candidate_host; + const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); + const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); + + return (sampled_active_rq < candidate_active_rq) ? + sampled_host : + candidate_host; } HostConstSharedPtr RandomLoadBalancer::peekAnotherHost(LoadBalancerContext* context) { diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 614541057798..92c3624a4575 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -710,10 +710,25 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { least_request_config.has_active_request_bias() ? absl::optional( {least_request_config.active_request_bias(), runtime}) - : absl::nullopt) { + : absl::nullopt), + selection_method_(least_request_config.selection_method()) { initialize(); } + // 40 prime numbers that are well above the largest number of hosts we'd expect. + // Taken from https://www.bigprimes.net/archive/prime/1000000. + constexpr static unsigned long LARGE_PRIME_CHOICES[] = { + 15485863, 15485867, 15485917, 15485927, 15485933, + 15485941, 15485959, 15485989, 15485993, 15486013, + 15486041, 15486047, 15486059, 15486071, 15486101, + 15486139, 15486157, 15486173, 15486181, 15486193, + 15486209, 15486221, 15486227, 15486241, 15486257, + 15486259, 15486277, 15486281, 15486283, 15486287, + 15486347, 15486421, 15486433, 15486437, 15486451, + 15486469, 15486481, 15486487, 15486491, 15486511 + }; + constexpr static unsigned long LARGE_PRIME_CHOICES_LEN = std::size(LARGE_PRIME_CHOICES); + protected: void refresh(uint32_t priority) override { active_request_bias_ = active_request_bias_runtime_ != absl::nullopt @@ -737,6 +752,10 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { const HostsSource& source) override; HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) override; + HostSharedPtr unweightedHostPickFullScan(const HostVector& hosts_to_use); + HostSharedPtr unweightedHostPickNChoices(const HostVector& hosts_to_use); + HostSharedPtr pickLeastFromCandidateAndNewSample(HostSharedPtr candidate_host, + HostSharedPtr sampled_host); const uint32_t choice_count_; @@ -746,6 +765,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { double active_request_bias_{}; const absl::optional active_request_bias_runtime_; + const envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod selection_method_{}; }; /** diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 518f2a4de1d0..5cbf241509e1 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2880,6 +2880,115 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { EXPECT_EQ(hostSet().healthy_hosts_[3], lb_5.chooseHost(nullptr)); } +TEST_P(LeastRequestLoadBalancerTest, DefaultSelectionMethod) { + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; + EXPECT_EQ(lr_lb_config.selection_method(), + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_N_CHOICES); +} + +TEST_P(LeastRequestLoadBalancerTest, FullScanOneHostWithLeastRequests) { + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(4); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[2]->stats().rq_active_.set(2); + hostSet().healthy_hosts_[3]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[4]->stats().rq_active_.set(5); + + // Creating various load balancer objects with different choice configs. + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; + lr_lb_config.mutable_choice_count()->set_value(2); + // Enable full table scan on hosts. + lr_lb_config.set_selection_method( + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_FULL_SCAN); + common_config_.mutable_healthy_panic_threshold()->set_value(0); + + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(3); + LeastRequestLoadBalancer lb_3{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(4); + LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; + lr_lb_config.mutable_choice_count()->set_value(6); + LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; + + // With full scan we will always choose the host with least number of active requests. + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_3.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_4.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb_6.chooseHost(nullptr)); +} + +TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())}; + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. + + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[2]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[3]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[4]->stats().rq_active_.set(3); + + // Creating various load balancer objects with different choice configs. + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; + lr_lb_config.mutable_choice_count()->set_value(2); + // Enable full table scan on hosts. + lr_lb_config.set_selection_method( + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_FULL_SCAN); + common_config_.mutable_healthy_panic_threshold()->set_value(0); + + LeastRequestLoadBalancer lb{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; + + // When multiple hosts have the least number of requests, + // full scan mode picks the first such host that it encountered in its pseudo-random scan order. + // The test expects here are hard-coded assuming there are 5 hosts and the available prime numbers + // used in the psuedo-random scan algorithm have not been modified. Check those assumptions. + EXPECT_EQ(LeastRequestLoadBalancer::LARGE_PRIME_CHOICES[0], 15485863); + EXPECT_EQ(LeastRequestLoadBalancer::LARGE_PRIME_CHOICES[1], 15485867); + EXPECT_EQ(hostSet().healthy_hosts_.size(), 5); + + // large_prime_array_index is 0, which will select 15485863 from the large prime choices array. + // That causes the increment to be 15485863 % 5 = 3. Therefore, starting with + // index_before_first_increment (0) before the first increment, the sequence of indices scanned is: + // [3, 1, 4, 2, 0] + // Therefore, healthy host 1 should be selected. + unsigned long index_before_first_increment = 0; + unsigned long large_prime_array_index = 0; + EXPECT_CALL(random_, random()) + .WillOnce(Return(0)). + WillOnce(Return(index_before_first_increment)). + WillOnce(Return(large_prime_array_index)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb.chooseHost(nullptr)); + + // large_prime_array_index is 1, which will select 15485867 from the large prime choices array. + // That causes the increment to be 15485867 % 5 = 2. Therefore, starting with + // index_before_first_increment (3) before the first increment, the sequence of indices scanned is: + // [0, 2, 4, 1, 3] + // Therefore, healthy host 0 should be selected. + index_before_first_increment = 3; + large_prime_array_index = 1; + EXPECT_CALL(random_, random()) + .WillOnce(Return(0)). + WillOnce(Return(index_before_first_increment)). + WillOnce(Return(large_prime_array_index)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb.chooseHost(nullptr)); +} + TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; From 314cf87575a08f20c085c70d834145879d6f9013 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Tue, 23 Jan 2024 22:37:12 -0800 Subject: [PATCH 2/8] Remove FULL_SCAN mode tie-breaking bias Previously, the "full scan" selection method could have uneven selection probabilities among hosts tied for least requests. Now, the selection probability is equal among tied hosts. The implementation remains memory efficient (no dynamic memory allocation) by using the reservoir sampling technique to randomly select 1 of the tied hosts. Signed-off-by: Jared Kirschner --- .../least_request/v3/least_request.proto | 1 + source/common/upstream/load_balancer_impl.cc | 98 +++++++++---------- source/common/upstream/load_balancer_impl.h | 18 +--- .../upstream/load_balancer_impl_test.cc | 80 +++++++-------- 4 files changed, 91 insertions(+), 106 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 11f9e15fde12..8c78b69bd941 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -75,6 +75,7 @@ message LeastRequest { google.protobuf.BoolValue enable_full_scan = 5; // Method for selecting the host set from which to return the host with the fewest active requests. + // // Defaults to ``N_CHOICES``. SelectionMethod selection_method = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 05de23f2ba67..9f061ccfcc9d 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1300,12 +1300,14 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector HostSharedPtr candidate_host = nullptr; switch (selection_method_) { - case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_FULL_SCAN: + case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN: candidate_host = unweightedHostPickFullScan(hosts_to_use); break; - case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_N_CHOICES: - default: + case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::N_CHOICES: candidate_host = unweightedHostPickNChoices(hosts_to_use); + break; + default: + IS_ENVOY_BUG("unknown selection method specified for least request load balancer"); } return candidate_host; @@ -1314,42 +1316,42 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector HostSharedPtr LeastRequestLoadBalancer::unweightedHostPickFullScan(const HostVector& hosts_to_use) { HostSharedPtr candidate_host = nullptr; - // "Randomly" scan the host array using a linear congruential generator with the following parameters: - // - The "modulus" is the number of hosts - // - The "multiplier" is 1 - // - The "increment" is a non-zero value less than the modulus. - // It is guaranteed non-zero because it is determined by: - // random large prime number / modulus ... which never be 0 - // - The "start value" is a random index in the host array - // - // With these parameters, the generator has a period equal to the modulus (number of hosts). - // - // The effect of this algorithm is to visit each index of the host array exactly once. - // The next index will always be +"increment" indices from the current index. - // - // This algorithm is not *very* random, but it is random *enough* for its purpose. - // Approaches with more randomness were not used because they are less performant for larger host arrays - // (e.g., requiring the allocation of an array the length of the host array). - + uint32_t num_hosts_known_tied_for_least = 0; - // Because the initial value of host_scan_index will always be the last index scanned, - // it must be a random index. - unsigned long host_scan_index = random_.random() % hosts_to_use.size(); - unsigned long num_hosts = hosts_to_use.size(); + const uint32_t num_hosts = hosts_to_use.size(); - // Choose a random large prime greater than the host array length to ensure the "increment" - // can be any value between 0 and the array length that is coprime with the array length. - // By using a random large prime in each scan, the "increment" can differ between scans, - // and therefore the sequences of indices visited can differ between scans. This behavior is - // important to avoid selecting the exact same host every time if multiple hosts tie for - // least requests. - unsigned long large_prime = LARGE_PRIME_CHOICES[random_.random() % LARGE_PRIME_CHOICES_LEN]; + for (uint32_t i = 0; i < num_hosts; ++i) { + const HostSharedPtr& sampled_host = hosts_to_use[i]; - for (unsigned long i = 0; i < num_hosts; ++i) { - host_scan_index = (host_scan_index + large_prime) % num_hosts; - const HostSharedPtr& sampled_host = hosts_to_use[host_scan_index]; + if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. + num_hosts_known_tied_for_least = 1; + candidate_host = sampled_host; + continue; + } - candidate_host = pickLeastFromCandidateAndNewSample(candidate_host, sampled_host); + const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); + const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); + + if (sampled_active_rq < candidate_active_rq) { + // Reset the count of known tied hosts. + num_hosts_known_tied_for_least = 1; + candidate_host = sampled_host; + } else if (sampled_active_rq == candidate_active_rq) { + ++num_hosts_known_tied_for_least; + + // Use reservoir sampling to select 1 unique sample from the total number of hosts N + // that will tie for least requests after processing the full hosts array. + // + // Upon each new tie encountered, replace candidate_host with sampled_host + // 1 / num_hosts_known_tied_for_least percent of the time. + // The end result is that each tied host has an equal 1 / N chance of being the + // candidate_host returned by this function. + uint32_t random_tied_host_index = random_.random() % num_hosts_known_tied_for_least; + if (random_tied_host_index == 0) { + candidate_host = sampled_host; + } + } } return candidate_host; @@ -1362,25 +1364,21 @@ HostSharedPtr LeastRequestLoadBalancer::unweightedHostPickNChoices(const HostVec const int rand_idx = random_.random() % hosts_to_use.size(); const HostSharedPtr& sampled_host = hosts_to_use[rand_idx]; - candidate_host = pickLeastFromCandidateAndNewSample(candidate_host, sampled_host); - } + if (candidate_host == nullptr) { + // Make a first choice to start the comparisons. + candidate_host = sampled_host; + continue; + } - return candidate_host; -} + const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); + const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); -HostSharedPtr LeastRequestLoadBalancer::pickLeastFromCandidateAndNewSample(HostSharedPtr candidate_host, - HostSharedPtr sampled_host) { - if (candidate_host == nullptr) { - // Make a first choice to start the comparisons. - return sampled_host; + if (sampled_active_rq < candidate_active_rq) { + candidate_host = sampled_host; + } } - const auto candidate_active_rq = candidate_host->stats().rq_active_.value(); - const auto sampled_active_rq = sampled_host->stats().rq_active_.value(); - - return (sampled_active_rq < candidate_active_rq) ? - sampled_host : - candidate_host; + return candidate_host; } HostConstSharedPtr RandomLoadBalancer::peekAnotherHost(LoadBalancerContext* context) { diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 92c3624a4575..ca8ba404389c 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -715,20 +715,6 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { initialize(); } - // 40 prime numbers that are well above the largest number of hosts we'd expect. - // Taken from https://www.bigprimes.net/archive/prime/1000000. - constexpr static unsigned long LARGE_PRIME_CHOICES[] = { - 15485863, 15485867, 15485917, 15485927, 15485933, - 15485941, 15485959, 15485989, 15485993, 15486013, - 15486041, 15486047, 15486059, 15486071, 15486101, - 15486139, 15486157, 15486173, 15486181, 15486193, - 15486209, 15486221, 15486227, 15486241, 15486257, - 15486259, 15486277, 15486281, 15486283, 15486287, - 15486347, 15486421, 15486433, 15486437, 15486451, - 15486469, 15486481, 15486487, 15486491, 15486511 - }; - constexpr static unsigned long LARGE_PRIME_CHOICES_LEN = std::size(LARGE_PRIME_CHOICES); - protected: void refresh(uint32_t priority) override { active_request_bias_ = active_request_bias_runtime_ != absl::nullopt @@ -754,8 +740,6 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { const HostsSource& source) override; HostSharedPtr unweightedHostPickFullScan(const HostVector& hosts_to_use); HostSharedPtr unweightedHostPickNChoices(const HostVector& hosts_to_use); - HostSharedPtr pickLeastFromCandidateAndNewSample(HostSharedPtr candidate_host, - HostSharedPtr sampled_host); const uint32_t choice_count_; @@ -765,7 +749,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { double active_request_bias_{}; const absl::optional active_request_bias_runtime_; - const envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod selection_method_{}; + const envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::SelectionMethod selection_method_{}; }; /** diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 5cbf241509e1..b5d2c94b46cd 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2883,7 +2883,7 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { TEST_P(LeastRequestLoadBalancerTest, DefaultSelectionMethod) { envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; EXPECT_EQ(lr_lb_config.selection_method(), - envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_N_CHOICES); + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::N_CHOICES); } TEST_P(LeastRequestLoadBalancerTest, FullScanOneHostWithLeastRequests) { @@ -2906,7 +2906,7 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanOneHostWithLeastRequests) { lr_lb_config.mutable_choice_count()->set_value(2); // Enable full table scan on hosts. lr_lb_config.set_selection_method( - envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_FULL_SCAN); + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); common_config_.mutable_healthy_panic_threshold()->set_value(0); LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, @@ -2937,56 +2937,58 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. - hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); - hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); - hostSet().healthy_hosts_[2]->stats().rq_active_.set(3); - hostSet().healthy_hosts_[3]->stats().rq_active_.set(3); - hostSet().healthy_hosts_[4]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[0]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(3); + hostSet().healthy_hosts_[2]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[3]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[4]->stats().rq_active_.set(1); // Creating various load balancer objects with different choice configs. envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(2); // Enable full table scan on hosts. lr_lb_config.set_selection_method( - envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest_SelectionMethod_FULL_SCAN); + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); common_config_.mutable_healthy_panic_threshold()->set_value(0); LeastRequestLoadBalancer lb{priority_set_, nullptr, stats_, runtime_, random_, 1, lr_lb_config, simTime()}; - // When multiple hosts have the least number of requests, - // full scan mode picks the first such host that it encountered in its pseudo-random scan order. - // The test expects here are hard-coded assuming there are 5 hosts and the available prime numbers - // used in the psuedo-random scan algorithm have not been modified. Check those assumptions. - EXPECT_EQ(LeastRequestLoadBalancer::LARGE_PRIME_CHOICES[0], 15485863); - EXPECT_EQ(LeastRequestLoadBalancer::LARGE_PRIME_CHOICES[1], 15485867); - EXPECT_EQ(hostSet().healthy_hosts_.size(), 5); - - // large_prime_array_index is 0, which will select 15485863 from the large prime choices array. - // That causes the increment to be 15485863 % 5 = 3. Therefore, starting with - // index_before_first_increment (0) before the first increment, the sequence of indices scanned is: - // [3, 1, 4, 2, 0] - // Therefore, healthy host 1 should be selected. - unsigned long index_before_first_increment = 0; - unsigned long large_prime_array_index = 0; + // Indices 2-4 are tied for least. + // Random numbers are generated whenever a tie is encountered, which will occur at: + // - Index 1 (tied with index 0) + // - Index 3 (tied with index 2) -> use 998 so that 998 % 2 ties == 0 to select index 3 + // - Index 4 (tied with indices 2-3) -> use 4 so that 4 % 3 ties != 0 to keep index 3 + EXPECT_CALL(random_, random()) + .WillOnce(Return(0)) + .WillOnce(Return(0)) + .WillOnce(Return(998)) + .WillOnce(Return(4)); + EXPECT_EQ(hostSet().healthy_hosts_[3], lb.chooseHost(nullptr)); + + // Indices 2-4 are tied for least. + // Random numbers are generated whenever a tie is encountered, which will occur at: + // - Index 1 (tied with index 0) + // - Index 3 (tied with index 2) -> use 998 so that 998 % 2 ties == 0 to select index 3 + // - Index 4 (tied with indices 2-3) -> use 6 so that 6 % 3 ties == 0 to select index 4 EXPECT_CALL(random_, random()) - .WillOnce(Return(0)). - WillOnce(Return(index_before_first_increment)). - WillOnce(Return(large_prime_array_index)); - EXPECT_EQ(hostSet().healthy_hosts_[1], lb.chooseHost(nullptr)); - - // large_prime_array_index is 1, which will select 15485867 from the large prime choices array. - // That causes the increment to be 15485867 % 5 = 2. Therefore, starting with - // index_before_first_increment (3) before the first increment, the sequence of indices scanned is: - // [0, 2, 4, 1, 3] - // Therefore, healthy host 0 should be selected. - index_before_first_increment = 3; - large_prime_array_index = 1; + .WillOnce(Return(0)) + .WillOnce(Return(0)) + .WillOnce(Return(998)) + .WillOnce(Return(6)); + EXPECT_EQ(hostSet().healthy_hosts_[4], lb.chooseHost(nullptr)); + + // Indices 2-4 are tied for least. + // Random numbers are generated whenever a tie is encountered, which will occur at: + // - Index 1 (tied with index 0) + // - Index 3 (tied with index 2) -> use 999 so that 998 % 2 ties != 0 to keep index 2 + // - Index 4 (tied with indices 2-3) -> use 4 so that 4 % 3 ties != 0 to keep index 2 EXPECT_CALL(random_, random()) - .WillOnce(Return(0)). - WillOnce(Return(index_before_first_increment)). - WillOnce(Return(large_prime_array_index)); - EXPECT_EQ(hostSet().healthy_hosts_[0], lb.chooseHost(nullptr)); + .WillOnce(Return(0)) + .WillOnce(Return(0)) + .WillOnce(Return(999)) + .WillOnce(Return(4)); + EXPECT_EQ(hostSet().healthy_hosts_[2], lb.chooseHost(nullptr)); } TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { From b3ec95e398b83e3c9993052fe2055466fd960fd4 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 24 Jan 2024 15:05:21 -0800 Subject: [PATCH 3/8] Fix code formatting Signed-off-by: Jared Kirschner --- .../least_request/v3/least_request.proto | 1 + source/common/upstream/load_balancer_impl.cc | 14 ++++----- source/common/upstream/load_balancer_impl.h | 3 +- .../upstream/load_balancer_impl_test.cc | 30 +++++++++---------- 4 files changed, 25 insertions(+), 23 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 8c78b69bd941..2c235967db27 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -22,6 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy // extension point. See the :ref:`load balancing architecture overview // ` for more information. +// [#next-free-field: 7] message LeastRequest { // Available methods for selecting the host set from which to return the host with the // fewest active requests. diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 9f061ccfcc9d..2b754db3e65f 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1300,13 +1300,13 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector HostSharedPtr candidate_host = nullptr; switch (selection_method_) { - case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN: - candidate_host = unweightedHostPickFullScan(hosts_to_use); - break; - case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::N_CHOICES: - candidate_host = unweightedHostPickNChoices(hosts_to_use); - break; - default: + case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN: + candidate_host = unweightedHostPickFullScan(hosts_to_use); + break; + case envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::N_CHOICES: + candidate_host = unweightedHostPickNChoices(hosts_to_use); + break; + default: IS_ENVOY_BUG("unknown selection method specified for least request load balancer"); } diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index ca8ba404389c..55927ed4a1f0 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -749,7 +749,8 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { double active_request_bias_{}; const absl::optional active_request_bias_runtime_; - const envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::SelectionMethod selection_method_{}; + const envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::SelectionMethod + selection_method_{}; }; /** diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index b5d2c94b46cd..b1426b7624d0 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2882,7 +2882,7 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { TEST_P(LeastRequestLoadBalancerTest, DefaultSelectionMethod) { envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; - EXPECT_EQ(lr_lb_config.selection_method(), + EXPECT_EQ(lr_lb_config.selection_method(), envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::N_CHOICES); } @@ -2906,7 +2906,7 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanOneHostWithLeastRequests) { lr_lb_config.mutable_choice_count()->set_value(2); // Enable full table scan on hosts. lr_lb_config.set_selection_method( - envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); common_config_.mutable_healthy_panic_threshold()->set_value(0); LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, @@ -2948,7 +2948,7 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { lr_lb_config.mutable_choice_count()->set_value(2); // Enable full table scan on hosts. lr_lb_config.set_selection_method( - envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); + envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); common_config_.mutable_healthy_panic_threshold()->set_value(0); LeastRequestLoadBalancer lb{priority_set_, nullptr, stats_, runtime_, @@ -2960,10 +2960,10 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { // - Index 3 (tied with index 2) -> use 998 so that 998 % 2 ties == 0 to select index 3 // - Index 4 (tied with indices 2-3) -> use 4 so that 4 % 3 ties != 0 to keep index 3 EXPECT_CALL(random_, random()) - .WillOnce(Return(0)) - .WillOnce(Return(0)) - .WillOnce(Return(998)) - .WillOnce(Return(4)); + .WillOnce(Return(0)) + .WillOnce(Return(0)) + .WillOnce(Return(998)) + .WillOnce(Return(4)); EXPECT_EQ(hostSet().healthy_hosts_[3], lb.chooseHost(nullptr)); // Indices 2-4 are tied for least. @@ -2972,10 +2972,10 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { // - Index 3 (tied with index 2) -> use 998 so that 998 % 2 ties == 0 to select index 3 // - Index 4 (tied with indices 2-3) -> use 6 so that 6 % 3 ties == 0 to select index 4 EXPECT_CALL(random_, random()) - .WillOnce(Return(0)) - .WillOnce(Return(0)) - .WillOnce(Return(998)) - .WillOnce(Return(6)); + .WillOnce(Return(0)) + .WillOnce(Return(0)) + .WillOnce(Return(998)) + .WillOnce(Return(6)); EXPECT_EQ(hostSet().healthy_hosts_[4], lb.chooseHost(nullptr)); // Indices 2-4 are tied for least. @@ -2984,10 +2984,10 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { // - Index 3 (tied with index 2) -> use 999 so that 998 % 2 ties != 0 to keep index 2 // - Index 4 (tied with indices 2-3) -> use 4 so that 4 % 3 ties != 0 to keep index 2 EXPECT_CALL(random_, random()) - .WillOnce(Return(0)) - .WillOnce(Return(0)) - .WillOnce(Return(999)) - .WillOnce(Return(4)); + .WillOnce(Return(0)) + .WillOnce(Return(0)) + .WillOnce(Return(999)) + .WillOnce(Return(4)); EXPECT_EQ(hostSet().healthy_hosts_[2], lb.chooseHost(nullptr)); } From 549bd854189429652d25fb07f5417179d840a5e1 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 24 Jan 2024 16:05:24 -0800 Subject: [PATCH 4/8] Address PR review feedback Signed-off-by: Jared Kirschner --- .../least_request/v3/least_request.proto | 2 +- source/common/upstream/load_balancer_impl.cc | 10 +-- test/common/upstream/BUILD | 1 + .../upstream/load_balancer_impl_test.cc | 71 +++++++++---------- 4 files changed, 41 insertions(+), 43 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 2c235967db27..670d23169d3f 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -73,7 +73,7 @@ message LeastRequest { // [#not-implemented-hide:] // Unused. Replaced by the `selection_method` enum for better extensibility. - google.protobuf.BoolValue enable_full_scan = 5; + google.protobuf.BoolValue enable_full_scan = 5 [deprecated = true]; // Method for selecting the host set from which to return the host with the fewest active requests. // diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 2b754db3e65f..c7f717d61a3b 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1316,11 +1316,11 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector HostSharedPtr LeastRequestLoadBalancer::unweightedHostPickFullScan(const HostVector& hosts_to_use) { HostSharedPtr candidate_host = nullptr; - uint32_t num_hosts_known_tied_for_least = 0; + size_t num_hosts_known_tied_for_least = 0; - const uint32_t num_hosts = hosts_to_use.size(); + const size_t num_hosts = hosts_to_use.size(); - for (uint32_t i = 0; i < num_hosts; ++i) { + for (size_t i = 0; i < num_hosts; ++i) { const HostSharedPtr& sampled_host = hosts_to_use[i]; if (candidate_host == nullptr) { @@ -1344,10 +1344,10 @@ HostSharedPtr LeastRequestLoadBalancer::unweightedHostPickFullScan(const HostVec // that will tie for least requests after processing the full hosts array. // // Upon each new tie encountered, replace candidate_host with sampled_host - // 1 / num_hosts_known_tied_for_least percent of the time. + // with probability (1 / num_hosts_known_tied_for_least percent). // The end result is that each tied host has an equal 1 / N chance of being the // candidate_host returned by this function. - uint32_t random_tied_host_index = random_.random() % num_hosts_known_tied_for_least; + const size_t random_tied_host_index = random_.random() % num_hosts_known_tied_for_least; if (random_tied_host_index == 0) { candidate_host = sampled_host; } diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 12fe0396553a..62f2760f79ee 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -265,6 +265,7 @@ envoy_cc_test( srcs = ["load_balancer_impl_test.cc"], deps = [ ":utility_lib", + "//source/common/common:random_generator_lib", "//source/common/network:utility_lib", "//source/common/upstream:load_balancer_lib", "//source/common/upstream:upstream_includes", diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index b1426b7624d0..6607d6dd49ab 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -5,12 +5,14 @@ #include #include #include +#include #include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/health_check.pb.h" +#include "source/common/common/random_generator.h" #include "source/common/network/utility.h" #include "source/common/upstream/load_balancer_impl.h" #include "source/common/upstream/upstream_impl.h" @@ -2904,7 +2906,7 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanOneHostWithLeastRequests) { // Creating various load balancer objects with different choice configs. envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; lr_lb_config.mutable_choice_count()->set_value(2); - // Enable full table scan on hosts. + // Enable FULL_SCAN on hosts. lr_lb_config.set_selection_method( envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); common_config_.mutable_healthy_panic_threshold()->set_value(0); @@ -2951,44 +2953,39 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); common_config_.mutable_healthy_panic_threshold()->set_value(0); + auto random = Random::RandomGeneratorImpl(); + LeastRequestLoadBalancer lb{priority_set_, nullptr, stats_, runtime_, - random_, 1, lr_lb_config, simTime()}; + random, 1, lr_lb_config, simTime()}; + + // Make 1 million selections. Then, check that the selection probability is + // approximately equal among the 3 hosts tied for least requests. + // Accept a +/-0.5% deviation from the expected selection probability (33.3..%). + size_t num_selections = 1000000; + size_t expected_approx_selections_per_tied_host = num_selections / 3; + size_t abs_error = 5000; + + size_t host_2_counts = 0; + size_t host_3_counts = 0; + size_t host_4_counts = 0; + + for (size_t i = 0; i < num_selections; ++i) { + auto selected_host = lb.chooseHost(nullptr); + + if (selected_host == hostSet().healthy_hosts_[2]) { + ++host_2_counts; + } else if (selected_host == hostSet().healthy_hosts_[3]) { + ++host_3_counts; + } else if (selected_host == hostSet().healthy_hosts_[4]) { + ++host_4_counts; + } else { + FAIL() << "Must only select hosts with least requests"; + } + } - // Indices 2-4 are tied for least. - // Random numbers are generated whenever a tie is encountered, which will occur at: - // - Index 1 (tied with index 0) - // - Index 3 (tied with index 2) -> use 998 so that 998 % 2 ties == 0 to select index 3 - // - Index 4 (tied with indices 2-3) -> use 4 so that 4 % 3 ties != 0 to keep index 3 - EXPECT_CALL(random_, random()) - .WillOnce(Return(0)) - .WillOnce(Return(0)) - .WillOnce(Return(998)) - .WillOnce(Return(4)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb.chooseHost(nullptr)); - - // Indices 2-4 are tied for least. - // Random numbers are generated whenever a tie is encountered, which will occur at: - // - Index 1 (tied with index 0) - // - Index 3 (tied with index 2) -> use 998 so that 998 % 2 ties == 0 to select index 3 - // - Index 4 (tied with indices 2-3) -> use 6 so that 6 % 3 ties == 0 to select index 4 - EXPECT_CALL(random_, random()) - .WillOnce(Return(0)) - .WillOnce(Return(0)) - .WillOnce(Return(998)) - .WillOnce(Return(6)); - EXPECT_EQ(hostSet().healthy_hosts_[4], lb.chooseHost(nullptr)); - - // Indices 2-4 are tied for least. - // Random numbers are generated whenever a tie is encountered, which will occur at: - // - Index 1 (tied with index 0) - // - Index 3 (tied with index 2) -> use 999 so that 998 % 2 ties != 0 to keep index 2 - // - Index 4 (tied with indices 2-3) -> use 4 so that 4 % 3 ties != 0 to keep index 2 - EXPECT_CALL(random_, random()) - .WillOnce(Return(0)) - .WillOnce(Return(0)) - .WillOnce(Return(999)) - .WillOnce(Return(4)); - EXPECT_EQ(hostSet().healthy_hosts_[2], lb.chooseHost(nullptr)); + EXPECT_NEAR(expected_approx_selections_per_tied_host, host_2_counts, abs_error); + EXPECT_NEAR(expected_approx_selections_per_tied_host, host_3_counts, abs_error); + EXPECT_NEAR(expected_approx_selections_per_tied_host, host_4_counts, abs_error); } TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { From 6b362f4016058c1483a2e2e72ebba72a56d8bd12 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 24 Jan 2024 16:12:19 -0800 Subject: [PATCH 5/8] Remove outdated test cruft Signed-off-by: Jared Kirschner --- .../upstream/load_balancer_impl_test.cc | 32 +++++-------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 6607d6dd49ab..4daaf19cee0d 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -2903,31 +2903,17 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanOneHostWithLeastRequests) { hostSet().healthy_hosts_[3]->stats().rq_active_.set(1); hostSet().healthy_hosts_[4]->stats().rq_active_.set(5); - // Creating various load balancer objects with different choice configs. envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; - lr_lb_config.mutable_choice_count()->set_value(2); + // Enable FULL_SCAN on hosts. lr_lb_config.set_selection_method( envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); - common_config_.mutable_healthy_panic_threshold()->set_value(0); - LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, - random_, 1, lr_lb_config, simTime()}; - lr_lb_config.mutable_choice_count()->set_value(3); - LeastRequestLoadBalancer lb_3{priority_set_, nullptr, stats_, runtime_, - random_, 1, lr_lb_config, simTime()}; - lr_lb_config.mutable_choice_count()->set_value(4); - LeastRequestLoadBalancer lb_4{priority_set_, nullptr, stats_, runtime_, - random_, 1, lr_lb_config, simTime()}; - lr_lb_config.mutable_choice_count()->set_value(6); - LeastRequestLoadBalancer lb_6{priority_set_, nullptr, stats_, runtime_, - random_, 1, lr_lb_config, simTime()}; - - // With full scan we will always choose the host with least number of active requests. - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_2.chooseHost(nullptr)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_3.chooseHost(nullptr)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_4.chooseHost(nullptr)); - EXPECT_EQ(hostSet().healthy_hosts_[3], lb_6.chooseHost(nullptr)); + LeastRequestLoadBalancer lb{priority_set_, nullptr, stats_, runtime_, + random_, 1, lr_lb_config, simTime()}; + + // With FULL_SCAN we will always choose the host with least number of active requests. + EXPECT_EQ(hostSet().healthy_hosts_[3], lb.chooseHost(nullptr)); } TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { @@ -2945,13 +2931,11 @@ TEST_P(LeastRequestLoadBalancerTest, FullScanMultipleHostsWithLeastRequests) { hostSet().healthy_hosts_[3]->stats().rq_active_.set(1); hostSet().healthy_hosts_[4]->stats().rq_active_.set(1); - // Creating various load balancer objects with different choice configs. envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest lr_lb_config; - lr_lb_config.mutable_choice_count()->set_value(2); - // Enable full table scan on hosts. + + // Enable FULL_SCAN on hosts. lr_lb_config.set_selection_method( envoy::extensions::load_balancing_policies::least_request::v3::LeastRequest::FULL_SCAN); - common_config_.mutable_healthy_panic_threshold()->set_value(0); auto random = Random::RandomGeneratorImpl(); From 79b04c039b53ca33c493af2bade24c8182ee7e7f Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 24 Jan 2024 16:29:18 -0800 Subject: [PATCH 6/8] Fix code formatting Signed-off-by: Jared Kirschner --- .../extensions/load_balancing_policies/least_request/v3/BUILD | 1 + .../least_request/v3/least_request.proto | 4 +++- source/common/upstream/load_balancer_impl.cc | 2 +- test/common/upstream/load_balancer_impl_test.cc | 1 - 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/BUILD b/api/envoy/extensions/load_balancing_policies/least_request/v3/BUILD index b45c78410e7d..1ba2da7dfdc2 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/BUILD +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", "//envoy/extensions/load_balancing_policies/common/v3:pkg", "@com_github_cncf_xds//udpa/annotations:pkg", diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 670d23169d3f..68348ec1a35c 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -7,6 +7,7 @@ import "envoy/extensions/load_balancing_policies/common/v3/common.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -73,7 +74,8 @@ message LeastRequest { // [#not-implemented-hide:] // Unused. Replaced by the `selection_method` enum for better extensibility. - google.protobuf.BoolValue enable_full_scan = 5 [deprecated = true]; + google.protobuf.BoolValue enable_full_scan = 5 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Method for selecting the host set from which to return the host with the fewest active requests. // diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index c7f717d61a3b..cdbe44a01fad 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -1307,7 +1307,7 @@ HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPick(const HostVector candidate_host = unweightedHostPickNChoices(hosts_to_use); break; default: - IS_ENVOY_BUG("unknown selection method specified for least request load balancer"); + IS_ENVOY_BUG("unknown selection method specified for least request load balancer"); } return candidate_host; diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 4daaf19cee0d..2dcf96322408 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/config/cluster/v3/cluster.pb.h" From ca5cf25c1afd6da9d840aec596c5c640e021f057 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 24 Jan 2024 16:42:38 -0800 Subject: [PATCH 7/8] Add word to spell check dictionary Signed-off-by: Jared Kirschner --- tools/spelling/spelling_dictionary.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index d7f46a9d8c08..9de34d2bdb5d 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -747,6 +747,7 @@ exe execlp exprfor expectable +extensibility extrahelp faceplant facto From 53ebe906aee39a3f03dcf6b1b7076edfc5be7304 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 26 Jan 2024 09:33:15 -0800 Subject: [PATCH 8/8] Clarify FULL_SCAN method use cases Signed-off-by: Jared Kirschner --- .../least_request/v3/least_request.proto | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto index 68348ec1a35c..095f60752869 100644 --- a/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto +++ b/api/envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto @@ -28,10 +28,23 @@ message LeastRequest { // Available methods for selecting the host set from which to return the host with the // fewest active requests. enum SelectionMethod { - // Return host with fewest requests from a set of ``choice_count`` randomly selected hosts + // Return host with fewest requests from a set of ``choice_count`` randomly selected hosts. + // Best selection method for most scenarios. N_CHOICES = 0; - // Return host with fewest requests from all hosts + // Return host with fewest requests from all hosts. + // Useful in some niche use cases involving low request rates and one of: + // (example 1) low request limits on workloads, or (example 2) few hosts. + // + // Example 1: Consider a workload type that can only accept one connection at a time. + // If such workloads are deployed across many hosts, only a small percentage of those + // workloads have zero connections at any given time, and the rate of new connections is low, + // the ``FULL_SCAN`` method is more likely to select a suitable host than ``N_CHOICES``. + // + // Example 2: Consider a workload type that is only deployed on 2 hosts. With default settings, + // the ``N_CHOICES`` method will return the host with more active requests 25% of the time. + // If the request rate is sufficiently low, the behavior of always selecting the host with least + // requests as of the last metrics refresh may be preferable. FULL_SCAN = 1; }