Skip to content

Commit

Permalink
HTTP2: Enable deferred processing by default (#30618)
Browse files Browse the repository at this point in the history
Signed-off-by: Kevin Baichoo <kbaichoo@google.com>
  • Loading branch information
KBaichoo authored Oct 31, 2023
1 parent 9fb9844 commit 4b989c9
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 12 deletions.
5 changes: 5 additions & 0 deletions changelogs/current.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@ minor_behavior_changes:
Added new configuration field :ref:`rate_limited_as_resource_exhausted
<envoy_v3_api_field_extensions.filters.http.local_ratelimit.v3.LocalRateLimit.rate_limited_as_resource_exhausted>`
to allow for setting if rate limit grpc response should be RESOURCE_EXHAUSTED instead of the default UNAVAILABLE.
- area: http2
change: |
Flip the runtime guard ``envoy.reloadable_features.defer_processing_backedup_streams`` to be on by default.
This feature improves flow control within the proxy by deferring work on the receiving end if the other
end is backed up.
bug_fixes:
# *Changes expected to improve the state of the world and are unlikely to have negative effects*
Expand Down
4 changes: 1 addition & 3 deletions source/common/runtime/runtime_features.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ RUNTIME_GUARD(envoy_reloadable_features_conn_pool_delete_when_idle);
RUNTIME_GUARD(envoy_reloadable_features_convert_legacy_lb_config);
RUNTIME_GUARD(envoy_reloadable_features_copy_response_code_to_downstream_stream_info);
RUNTIME_GUARD(envoy_reloadable_features_count_unused_mapped_pages_as_free);
RUNTIME_GUARD(envoy_reloadable_features_defer_processing_backedup_streams);
RUNTIME_GUARD(envoy_reloadable_features_detect_and_raise_rst_tcp_connection);
RUNTIME_GUARD(envoy_reloadable_features_dfp_mixed_scheme);
RUNTIME_GUARD(envoy_reloadable_features_enable_aws_credentials_file);
Expand Down Expand Up @@ -98,9 +99,6 @@ FALSE_RUNTIME_GUARD(envoy_reloadable_features_test_feature_false);
FALSE_RUNTIME_GUARD(envoy_reloadable_features_streaming_shadow);
// TODO(adisuissa) reset to true to enable unified mux by default
FALSE_RUNTIME_GUARD(envoy_reloadable_features_unified_mux);
// TODO(kbaichoo): Make this enabled by default when fairness and chunking
// are implemented, and we've had more cpu time.
FALSE_RUNTIME_GUARD(envoy_reloadable_features_defer_processing_backedup_streams);
// TODO(birenroy) flip after a burn-in period
FALSE_RUNTIME_GUARD(envoy_reloadable_features_http2_use_oghttp2);
// Used to track if runtime is initialized.
Expand Down
20 changes: 12 additions & 8 deletions test/common/http/codec_impl_fuzz_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -377,20 +377,24 @@ class HttpStream : public LinkedObject<HttpStream> {
dispatcher = &context_.client_connection_.dispatcher_;
}

// With this feature enabled for http2 we end up creating a schedulable
// callback the first time we re-enable reading as it's used to process
// the backed up data.
// With this feature enabled for http2 the codec may end up creating a
// schedulable callback the first time it re-enables reading as it's used
// to process the backed up data if there's any to process.
if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) {
const bool expecting_schedulable_callback_creation =
const bool might_schedulable_callback_creation =
http_protocol_ == Protocol::Http2 && state.read_disable_count_ == 0 && !disable &&
!state.created_schedulable_callback_;

if (expecting_schedulable_callback_creation) {
if (might_schedulable_callback_creation) {
ASSERT(dispatcher != nullptr);
state.created_schedulable_callback_ = true;
// The unique pointer of this object will be returned in createSchedulableCallback_ of
// dispatcher, so there is no risk of object leak.
new Event::MockSchedulableCallback(dispatcher);
ON_CALL(*dispatcher, createSchedulableCallback_(_))
.WillByDefault(testing::Invoke([dispatcher](std::function<void()> cb) {
// The unique pointer of this object will be returned in
// createSchedulableCallback_ of dispatcher, so there is no risk of this object
// leaking.
return new Event::MockSchedulableCallback(dispatcher, cb);
}));
}
}

Expand Down
13 changes: 12 additions & 1 deletion test/integration/shadow_policy_integration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -510,6 +510,11 @@ TEST_P(ShadowPolicyIntegrationTest, MainRequestOverBufferLimit) {
GTEST_SKIP() << "Not applicable for non-streaming shadows.";
}
autonomous_upstream_ = true;
if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) {
// With deferred processing, a local reply is triggered so the upstream
// stream will be incomplete.
autonomous_allow_incomplete_streams_ = true;
}
cluster_with_custom_filter_ = 0;
filter_name_ = "encoder-decoder-buffer-filter";
initialConfigSetup("cluster_1", "");
Expand Down Expand Up @@ -537,7 +542,13 @@ TEST_P(ShadowPolicyIntegrationTest, MainRequestOverBufferLimit) {

EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_total")->value(), 1);
EXPECT_EQ(test_server_->counter("cluster.cluster_1.upstream_cx_total")->value(), 1);
test_server_->waitForCounterEq("cluster.cluster_1.upstream_rq_completed", 1);
if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) {
// With deferred processing, the encoder-decoder-buffer-filter will
// buffer too much data triggering a local reply.
test_server_->waitForCounterEq("http.config_test.downstream_rq_4xx", 1);
} else {
test_server_->waitForCounterEq("cluster.cluster_1.upstream_rq_completed", 1);
}
}

TEST_P(ShadowPolicyIntegrationTest, ShadowRequestOverBufferLimit) {
Expand Down
11 changes: 11 additions & 0 deletions test/mocks/event/mocks.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,23 @@ MockSchedulableCallback::~MockSchedulableCallback() {
}
}

MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher,
std::function<void()> callback,
testing::MockFunction<void()>* destroy_cb)
: dispatcher_(dispatcher), callback_(callback), destroy_cb_(destroy_cb) {
ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true));
ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true));
ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false));
ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_));
}

MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher,
testing::MockFunction<void()>* destroy_cb)
: dispatcher_(dispatcher), destroy_cb_(destroy_cb) {
EXPECT_CALL(*dispatcher, createSchedulableCallback_(_))
.WillOnce(DoAll(SaveArg<0>(&callback_), Return(this)))
.RetiresOnSaturation();

ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true));
ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true));
ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false));
Expand Down
2 changes: 2 additions & 0 deletions test/mocks/event/mocks.h
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,8 @@ class MockSchedulableCallback : public SchedulableCallback {
public:
MockSchedulableCallback(MockDispatcher* dispatcher,
testing::MockFunction<void()>* destroy_cb = nullptr);
MockSchedulableCallback(MockDispatcher* dispatcher, std::function<void()> callback,
testing::MockFunction<void()>* destroy_cb = nullptr);
~MockSchedulableCallback() override;

void invokeCallback() {
Expand Down

0 comments on commit 4b989c9

Please sign in to comment.