Skip to content

Commit

Permalink
Deprecation: Remove allow compact maglev runtime guard. (#30431)
Browse files Browse the repository at this point in the history
Remove allow compact maglev runtime guard.

Signed-off-by: Kevin Baichoo <kbaichoo@google.com>
  • Loading branch information
KBaichoo authored Oct 24, 2023
1 parent 1f0192d commit 2489e24
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 29 deletions.
5 changes: 3 additions & 2 deletions changelogs/current.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,16 @@ bug_fixes:
removed_config_or_runtime:
# *Normally occurs at the end of the* :ref:`deprecation period <deprecated>`

- area: http
change: |
removed ``envoy.reloadable_features.correctly_validate_alpn`` and legacy code paths.
- area: maglev
change: |
Removed ``envoy.reloadable_features.allow_compact_maglev`` and legacy code paths.
- area: router
change: |
Removed the deprecated ``envoy.reloadable_features.prohibit_route_refresh_after_response_headers_sent``
runtime flag and legacy code path.
- area: upstream
change: |
Removed the deprecated ``envoy.reloadable_features.validate_detailed_override_host_statuses``
Expand Down
1 change: 0 additions & 1 deletion source/common/runtime/runtime_features.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
// ASAP by filing a bug on github. Overriding non-buggy code is strongly discouraged to avoid the
// problem of the bugs being found after the old code path has been removed.
RUNTIME_GUARD(envoy_reloadable_features_allow_absolute_url_with_mixed_scheme);
RUNTIME_GUARD(envoy_reloadable_features_allow_compact_maglev);
RUNTIME_GUARD(envoy_reloadable_features_append_xfh_idempotent);
RUNTIME_GUARD(envoy_reloadable_features_check_mep_on_first_eject);
RUNTIME_GUARD(envoy_reloadable_features_conn_pool_delete_when_idle);
Expand Down
18 changes: 18 additions & 0 deletions source/extensions/load_balancing_policies/maglev/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,24 @@ envoy_cc_library(
],
)

# This library target is for testing especially x86 coverage
# which will use the compact representation unless we force the
# original implementation.
envoy_cc_library(
name = "maglev_lb_force_original_impl_lib",
srcs = ["maglev_lb.cc"],
hdrs = ["maglev_lb.h"],
copts = ["-DMAGLEV_LB_FORCE_ORIGINAL_IMPL"],
deps = [
"//envoy/upstream:load_balancer_interface",
"//source/common/common:bit_array_lib",
"//source/common/runtime:runtime_features_lib",
"//source/common/upstream:thread_aware_lb_lib",
"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto",
"@envoy_api//envoy/extensions/load_balancing_policies/maglev/v3:pkg_cc_proto",
],
)

envoy_cc_extension(
name = "config",
srcs = ["config.cc"],
Expand Down
7 changes: 5 additions & 2 deletions source/extensions/load_balancing_policies/maglev/maglev_lb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ bool shouldUseCompactTable(size_t num_hosts, uint64_t table_size) {
return false;
}

#ifdef MAGLEV_LB_FORCE_ORIGINAL_IMPL
return false;
#endif

if (num_hosts > MaglevTable::MaxNumberOfHostsForCompactMaglev) {
return false;
}
Expand All @@ -37,8 +41,7 @@ class MaglevFactory : private Logger::Loggable<Logger::Id::upstream> {
bool use_hostname_for_hashing, MaglevLoadBalancerStats& stats) {

MaglevTableSharedPtr maglev_table;
if (shouldUseCompactTable(normalized_host_weights.size(), table_size) &&
Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_compact_maglev")) {
if (shouldUseCompactTable(normalized_host_weights.size(), table_size)) {
maglev_table =
std::make_shared<CompactMaglevTable>(normalized_host_weights, max_normalized_weight,
table_size, use_hostname_for_hashing, stats);
Expand Down
21 changes: 21 additions & 0 deletions test/extensions/load_balancing_policies/maglev/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,27 @@ envoy_extension_cc_test(
],
)

# Runs the same test suite as :maglev_lb_test with the forced original implementation
# to ensure coverage on x86.
envoy_extension_cc_test(
name = "maglev_lb_force_original_impl_test",
srcs = ["maglev_lb_test.cc"],
extension_names = ["envoy.load_balancing_policies.maglev"],
deps = [
"//source/extensions/load_balancing_policies/maglev:maglev_lb_force_original_impl_lib",
"//test/common/upstream:utility_lib",
"//test/mocks:common_lib",
"//test/mocks/upstream:cluster_info_mocks",
"//test/mocks/upstream:host_mocks",
"//test/mocks/upstream:host_set_mocks",
"//test/mocks/upstream:load_balancer_context_mock",
"//test/mocks/upstream:priority_set_mocks",
"//test/test_common:simulated_time_system_lib",
"//test/test_common:test_runtime_lib",
"@envoy_api//envoy/config/cluster/v3:pkg_cc_proto",
],
)

envoy_extension_cc_test(
name = "config_test",
srcs = ["config_test.cc"],
Expand Down
41 changes: 17 additions & 24 deletions test/extensions/load_balancing_policies/maglev/maglev_lb_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,10 @@ class TestLoadBalancerContext : public LoadBalancerContextBase {

// Note: ThreadAwareLoadBalancer base is heavily tested by RingHashLoadBalancerTest. Only basic
// functionality is covered here.
class MaglevLoadBalancerTest : public Event::TestUsingSimulatedTime,
public testing::TestWithParam<bool> {
class MaglevLoadBalancerTest : public Event::TestUsingSimulatedTime, public testing::Test {
public:
MaglevLoadBalancerTest()
: stat_names_(stats_store_.symbolTable()), stats_(stat_names_, *stats_store_.rootScope()) {
scoped_runtime_.mergeValues(
{{"envoy.reloadable_features.allow_compact_maglev", GetParam() ? "true" : "false"}});
}
: stat_names_(stats_store_.symbolTable()), stats_(stat_names_, *stats_store_.rootScope()) {}

void createLb() {
lb_ = std::make_unique<MaglevLoadBalancer>(
Expand Down Expand Up @@ -91,13 +87,10 @@ class MaglevLoadBalancerTest : public Event::TestUsingSimulatedTime,
NiceMock<Runtime::MockLoader> runtime_;
NiceMock<Random::MockRandomGenerator> random_;
std::unique_ptr<MaglevLoadBalancer> lb_;
TestScopedRuntime scoped_runtime_;
};

INSTANTIATE_TEST_SUITE_P(MaglevTests, MaglevLoadBalancerTest, ::testing::Bool());

// Works correctly without any hosts.
TEST_P(MaglevLoadBalancerTest, NoHost) {
TEST_F(MaglevLoadBalancerTest, NoHost) {
init(7);
EXPECT_EQ(nullptr, lb_->factory()->create(lb_params_)->chooseHost(nullptr));
};
Expand All @@ -106,7 +99,7 @@ TEST_P(MaglevLoadBalancerTest, NoHost) {
// cluster, the operation does not immediately reach the worker thread. There may be cases where the
// thread aware load balancer is destructed, but the load balancer factory is still used in the
// worker thread.
TEST_P(MaglevLoadBalancerTest, LbDestructedBeforeFactory) {
TEST_F(MaglevLoadBalancerTest, LbDestructedBeforeFactory) {
init(7);

auto factory = lb_->factory();
Expand All @@ -116,13 +109,13 @@ TEST_P(MaglevLoadBalancerTest, LbDestructedBeforeFactory) {
}

// Throws an exception if table size is not a prime number.
TEST_P(MaglevLoadBalancerTest, NoPrimeNumber) {
TEST_F(MaglevLoadBalancerTest, NoPrimeNumber) {
EXPECT_THROW_WITH_MESSAGE(init(8), EnvoyException,
"The table size of maglev must be prime number");
};

// Check it has default table size if config is null or table size has invalid value.
TEST_P(MaglevLoadBalancerTest, DefaultMaglevTableSize) {
TEST_F(MaglevLoadBalancerTest, DefaultMaglevTableSize) {
const uint64_t defaultValue = MaglevTable::DefaultTableSize;

config_ = envoy::config::cluster::v3::Cluster::MaglevLbConfig();
Expand All @@ -135,7 +128,7 @@ TEST_P(MaglevLoadBalancerTest, DefaultMaglevTableSize) {
};

// Basic sanity tests.
TEST_P(MaglevLoadBalancerTest, Basic) {
TEST_F(MaglevLoadBalancerTest, Basic) {
host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()),
makeTestHost(info_, "tcp://127.0.0.1:91", simTime()),
makeTestHost(info_, "tcp://127.0.0.1:92", simTime()),
Expand Down Expand Up @@ -168,7 +161,7 @@ TEST_P(MaglevLoadBalancerTest, Basic) {
}

// Basic with hostname.
TEST_P(MaglevLoadBalancerTest, BasicWithHostName) {
TEST_F(MaglevLoadBalancerTest, BasicWithHostName) {
host_set_.hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90", simTime()),
makeTestHost(info_, "91", "tcp://127.0.0.1:91", simTime()),
makeTestHost(info_, "92", "tcp://127.0.0.1:92", simTime()),
Expand Down Expand Up @@ -203,7 +196,7 @@ TEST_P(MaglevLoadBalancerTest, BasicWithHostName) {
}

// Basic with metadata hash_key.
TEST_P(MaglevLoadBalancerTest, BasicWithMetadataHashKey) {
TEST_F(MaglevLoadBalancerTest, BasicWithMetadataHashKey) {
host_set_.hosts_ = {makeTestHostWithHashKey(info_, "90", "tcp://127.0.0.1:90", simTime()),
makeTestHostWithHashKey(info_, "91", "tcp://127.0.0.1:91", simTime()),
makeTestHostWithHashKey(info_, "92", "tcp://127.0.0.1:92", simTime()),
Expand Down Expand Up @@ -238,7 +231,7 @@ TEST_P(MaglevLoadBalancerTest, BasicWithMetadataHashKey) {
}

// Same ring as the Basic test, but exercise retry host predicate behavior.
TEST_P(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) {
TEST_F(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) {
host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()),
makeTestHost(info_, "tcp://127.0.0.1:91", simTime()),
makeTestHost(info_, "tcp://127.0.0.1:92", simTime()),
Expand Down Expand Up @@ -286,7 +279,7 @@ TEST_P(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) {
}

// Basic stability test.
TEST_P(MaglevLoadBalancerTest, BasicStability) {
TEST_F(MaglevLoadBalancerTest, BasicStability) {
host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()),
makeTestHost(info_, "tcp://127.0.0.1:91", simTime()),
makeTestHost(info_, "tcp://127.0.0.1:92", simTime()),
Expand Down Expand Up @@ -331,7 +324,7 @@ TEST_P(MaglevLoadBalancerTest, BasicStability) {
}

// Weighted sanity test.
TEST_P(MaglevLoadBalancerTest, Weighted) {
TEST_F(MaglevLoadBalancerTest, Weighted) {
host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1),
makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2)};
host_set_.healthy_hosts_ = host_set_.hosts_;
Expand Down Expand Up @@ -369,7 +362,7 @@ TEST_P(MaglevLoadBalancerTest, Weighted) {

// Locality weighted sanity test when localities have the same weights. Host weights for hosts in
// different localities shouldn't matter.
TEST_P(MaglevLoadBalancerTest, LocalityWeightedSameLocalityWeights) {
TEST_F(MaglevLoadBalancerTest, LocalityWeightedSameLocalityWeights) {
envoy::config::core::v3::Locality zone_a;
zone_a.set_zone("A");
envoy::config::core::v3::Locality zone_b;
Expand Down Expand Up @@ -417,7 +410,7 @@ TEST_P(MaglevLoadBalancerTest, LocalityWeightedSameLocalityWeights) {

// Locality weighted sanity test when localities have different weights. Host weights for hosts in
// different localities shouldn't matter.
TEST_P(MaglevLoadBalancerTest, LocalityWeightedDifferentLocalityWeights) {
TEST_F(MaglevLoadBalancerTest, LocalityWeightedDifferentLocalityWeights) {
envoy::config::core::v3::Locality zone_a;
zone_a.set_zone("A");
envoy::config::core::v3::Locality zone_b;
Expand Down Expand Up @@ -467,7 +460,7 @@ TEST_P(MaglevLoadBalancerTest, LocalityWeightedDifferentLocalityWeights) {
}

// Locality weighted with all localities zero weighted.
TEST_P(MaglevLoadBalancerTest, LocalityWeightedAllZeroLocalityWeights) {
TEST_F(MaglevLoadBalancerTest, LocalityWeightedAllZeroLocalityWeights) {
host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1)};
host_set_.healthy_hosts_ = host_set_.hosts_;
host_set_.hosts_per_locality_ = makeHostsPerLocality({{host_set_.hosts_[0]}});
Expand All @@ -483,7 +476,7 @@ TEST_P(MaglevLoadBalancerTest, LocalityWeightedAllZeroLocalityWeights) {

// Validate that when we are in global panic and have localities, we get sane
// results (fall back to non-healthy hosts).
TEST_P(MaglevLoadBalancerTest, LocalityWeightedGlobalPanic) {
TEST_F(MaglevLoadBalancerTest, LocalityWeightedGlobalPanic) {
envoy::config::core::v3::Locality zone_a;
zone_a.set_zone("A");
envoy::config::core::v3::Locality zone_b;
Expand Down Expand Up @@ -531,7 +524,7 @@ TEST_P(MaglevLoadBalancerTest, LocalityWeightedGlobalPanic) {

// Given extremely lopsided locality weights, and a table that isn't large enough to fit all hosts,
// expect that the least-weighted hosts appear once, and the most-weighted host fills the remainder.
TEST_P(MaglevLoadBalancerTest, LocalityWeightedLopsided) {
TEST_F(MaglevLoadBalancerTest, LocalityWeightedLopsided) {
envoy::config::core::v3::Locality zone_a;
zone_a.set_zone("A");
envoy::config::core::v3::Locality zone_b;
Expand Down

0 comments on commit 2489e24

Please sign in to comment.