From 4b989c95493122ca8949840db8c3413491f0a61f Mon Sep 17 00:00:00 2001 From: Kevin Baichoo Date: Tue, 31 Oct 2023 09:20:29 -0400 Subject: [PATCH] HTTP2: Enable deferred processing by default (#30618) Signed-off-by: Kevin Baichoo --- changelogs/current.yaml | 5 +++++ source/common/runtime/runtime_features.cc | 4 +--- test/common/http/codec_impl_fuzz_test.cc | 20 +++++++++++-------- .../shadow_policy_integration_test.cc | 13 +++++++++++- test/mocks/event/mocks.cc | 11 ++++++++++ test/mocks/event/mocks.h | 2 ++ 6 files changed, 43 insertions(+), 12 deletions(-) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index f4d9093a0a68..1bddbbff6676 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -23,6 +23,11 @@ minor_behavior_changes: Added new configuration field :ref:`rate_limited_as_resource_exhausted ` to allow for setting if rate limit grpc response should be RESOURCE_EXHAUSTED instead of the default UNAVAILABLE. +- area: http2 + change: | + Flip the runtime guard ``envoy.reloadable_features.defer_processing_backedup_streams`` to be on by default. + This feature improves flow control within the proxy by deferring work on the receiving end if the other + end is backed up. bug_fixes: # *Changes expected to improve the state of the world and are unlikely to have negative effects* diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index fb5b91a9a949..cb5212ddf07e 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -36,6 +36,7 @@ RUNTIME_GUARD(envoy_reloadable_features_conn_pool_delete_when_idle); RUNTIME_GUARD(envoy_reloadable_features_convert_legacy_lb_config); RUNTIME_GUARD(envoy_reloadable_features_copy_response_code_to_downstream_stream_info); RUNTIME_GUARD(envoy_reloadable_features_count_unused_mapped_pages_as_free); +RUNTIME_GUARD(envoy_reloadable_features_defer_processing_backedup_streams); RUNTIME_GUARD(envoy_reloadable_features_detect_and_raise_rst_tcp_connection); RUNTIME_GUARD(envoy_reloadable_features_dfp_mixed_scheme); RUNTIME_GUARD(envoy_reloadable_features_enable_aws_credentials_file); @@ -98,9 +99,6 @@ FALSE_RUNTIME_GUARD(envoy_reloadable_features_test_feature_false); FALSE_RUNTIME_GUARD(envoy_reloadable_features_streaming_shadow); // TODO(adisuissa) reset to true to enable unified mux by default FALSE_RUNTIME_GUARD(envoy_reloadable_features_unified_mux); -// TODO(kbaichoo): Make this enabled by default when fairness and chunking -// are implemented, and we've had more cpu time. -FALSE_RUNTIME_GUARD(envoy_reloadable_features_defer_processing_backedup_streams); // TODO(birenroy) flip after a burn-in period FALSE_RUNTIME_GUARD(envoy_reloadable_features_http2_use_oghttp2); // Used to track if runtime is initialized. diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 46db8ea12b5c..6101a2ed88c9 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -377,20 +377,24 @@ class HttpStream : public LinkedObject { dispatcher = &context_.client_connection_.dispatcher_; } - // With this feature enabled for http2 we end up creating a schedulable - // callback the first time we re-enable reading as it's used to process - // the backed up data. + // With this feature enabled for http2 the codec may end up creating a + // schedulable callback the first time it re-enables reading as it's used + // to process the backed up data if there's any to process. if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) { - const bool expecting_schedulable_callback_creation = + const bool might_schedulable_callback_creation = http_protocol_ == Protocol::Http2 && state.read_disable_count_ == 0 && !disable && !state.created_schedulable_callback_; - if (expecting_schedulable_callback_creation) { + if (might_schedulable_callback_creation) { ASSERT(dispatcher != nullptr); state.created_schedulable_callback_ = true; - // The unique pointer of this object will be returned in createSchedulableCallback_ of - // dispatcher, so there is no risk of object leak. - new Event::MockSchedulableCallback(dispatcher); + ON_CALL(*dispatcher, createSchedulableCallback_(_)) + .WillByDefault(testing::Invoke([dispatcher](std::function cb) { + // The unique pointer of this object will be returned in + // createSchedulableCallback_ of dispatcher, so there is no risk of this object + // leaking. + return new Event::MockSchedulableCallback(dispatcher, cb); + })); } } diff --git a/test/integration/shadow_policy_integration_test.cc b/test/integration/shadow_policy_integration_test.cc index 4f4c1b8855d7..fb196931d04d 100644 --- a/test/integration/shadow_policy_integration_test.cc +++ b/test/integration/shadow_policy_integration_test.cc @@ -510,6 +510,11 @@ TEST_P(ShadowPolicyIntegrationTest, MainRequestOverBufferLimit) { GTEST_SKIP() << "Not applicable for non-streaming shadows."; } autonomous_upstream_ = true; + if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) { + // With deferred processing, a local reply is triggered so the upstream + // stream will be incomplete. + autonomous_allow_incomplete_streams_ = true; + } cluster_with_custom_filter_ = 0; filter_name_ = "encoder-decoder-buffer-filter"; initialConfigSetup("cluster_1", ""); @@ -537,7 +542,13 @@ TEST_P(ShadowPolicyIntegrationTest, MainRequestOverBufferLimit) { EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_total")->value(), 1); EXPECT_EQ(test_server_->counter("cluster.cluster_1.upstream_cx_total")->value(), 1); - test_server_->waitForCounterEq("cluster.cluster_1.upstream_rq_completed", 1); + if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) { + // With deferred processing, the encoder-decoder-buffer-filter will + // buffer too much data triggering a local reply. + test_server_->waitForCounterEq("http.config_test.downstream_rq_4xx", 1); + } else { + test_server_->waitForCounterEq("cluster.cluster_1.upstream_rq_completed", 1); + } } TEST_P(ShadowPolicyIntegrationTest, ShadowRequestOverBufferLimit) { diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 16468bf697cb..e6a0b436f671 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -76,12 +76,23 @@ MockSchedulableCallback::~MockSchedulableCallback() { } } +MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher, + std::function callback, + testing::MockFunction* destroy_cb) + : dispatcher_(dispatcher), callback_(callback), destroy_cb_(destroy_cb) { + ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); + ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_)); +} + MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher, testing::MockFunction* destroy_cb) : dispatcher_(dispatcher), destroy_cb_(destroy_cb) { EXPECT_CALL(*dispatcher, createSchedulableCallback_(_)) .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this))) .RetiresOnSaturation(); + ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true)); ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index f3279645a31e..4a0b2f6f9e27 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -224,6 +224,8 @@ class MockSchedulableCallback : public SchedulableCallback { public: MockSchedulableCallback(MockDispatcher* dispatcher, testing::MockFunction* destroy_cb = nullptr); + MockSchedulableCallback(MockDispatcher* dispatcher, std::function callback, + testing::MockFunction* destroy_cb = nullptr); ~MockSchedulableCallback() override; void invokeCallback() {