@@ -68,6 +68,7 @@ use tower::{
68
68
layer:: util:: { Identity , Stack } ,
69
69
layer:: Layer ,
70
70
limit:: concurrency:: ConcurrencyLimitLayer ,
71
+ load_shed:: LoadShedLayer ,
71
72
util:: BoxCloneService ,
72
73
Service , ServiceBuilder , ServiceExt ,
73
74
} ;
@@ -89,6 +90,7 @@ const DEFAULT_HTTP2_KEEPALIVE_TIMEOUT_SECS: u64 = 20;
89
90
pub struct Server < L = Identity > {
90
91
trace_interceptor : Option < TraceInterceptor > ,
91
92
concurrency_limit : Option < usize > ,
93
+ load_shed : bool ,
92
94
timeout : Option < Duration > ,
93
95
#[ cfg( feature = "_tls-any" ) ]
94
96
tls : Option < TlsAcceptor > ,
@@ -113,6 +115,7 @@ impl Default for Server<Identity> {
113
115
Self {
114
116
trace_interceptor : None ,
115
117
concurrency_limit : None ,
118
+ load_shed : false ,
116
119
timeout : None ,
117
120
#[ cfg( feature = "_tls-any" ) ]
118
121
tls : None ,
@@ -181,6 +184,27 @@ impl<L> Server<L> {
181
184
}
182
185
}
183
186
187
+ /// Enable or disable load shedding. The default is disabled.
188
+ ///
189
+ /// When load shedding is enabled, if the service responds with not ready
190
+ /// the request will immediately be rejected with a
191
+ /// [`resource_exhausted`](https://docs.rs/tonic/latest/tonic/struct.Status.html#method.resource_exhausted) error.
192
+ /// The default is to buffer requests. This is especially useful in combination with
193
+ /// setting a concurrency limit per connection.
194
+ ///
195
+ /// # Example
196
+ ///
197
+ /// ```
198
+ /// # use tonic::transport::Server;
199
+ /// # use tower_service::Service;
200
+ /// # let builder = Server::builder();
201
+ /// builder.load_shed(true);
202
+ /// ```
203
+ #[ must_use]
204
+ pub fn load_shed ( self , load_shed : bool ) -> Self {
205
+ Server { load_shed, ..self }
206
+ }
207
+
184
208
/// Set a timeout on for all request handlers.
185
209
///
186
210
/// # Example
@@ -516,6 +540,7 @@ impl<L> Server<L> {
516
540
service_builder : self . service_builder . layer ( new_layer) ,
517
541
trace_interceptor : self . trace_interceptor ,
518
542
concurrency_limit : self . concurrency_limit ,
543
+ load_shed : self . load_shed ,
519
544
timeout : self . timeout ,
520
545
#[ cfg( feature = "_tls-any" ) ]
521
546
tls : self . tls ,
@@ -645,6 +670,7 @@ impl<L> Server<L> {
645
670
{
646
671
let trace_interceptor = self . trace_interceptor . clone ( ) ;
647
672
let concurrency_limit = self . concurrency_limit ;
673
+ let load_shed = self . load_shed ;
648
674
let init_connection_window_size = self . init_connection_window_size ;
649
675
let init_stream_window_size = self . init_stream_window_size ;
650
676
let max_concurrent_streams = self . max_concurrent_streams ;
@@ -671,6 +697,7 @@ impl<L> Server<L> {
671
697
let mut svc = MakeSvc {
672
698
inner : svc,
673
699
concurrency_limit,
700
+ load_shed,
674
701
timeout,
675
702
trace_interceptor,
676
703
_io : PhantomData ,
@@ -1056,6 +1083,7 @@ impl<S> fmt::Debug for Svc<S> {
1056
1083
#[ derive( Clone ) ]
1057
1084
struct MakeSvc < S , IO > {
1058
1085
concurrency_limit : Option < usize > ,
1086
+ load_shed : bool ,
1059
1087
timeout : Option < Duration > ,
1060
1088
inner : S ,
1061
1089
trace_interceptor : Option < TraceInterceptor > ,
@@ -1089,6 +1117,7 @@ where
1089
1117
1090
1118
let svc = ServiceBuilder :: new ( )
1091
1119
. layer ( RecoverErrorLayer :: new ( ) )
1120
+ . option_layer ( self . load_shed . then_some ( LoadShedLayer :: new ( ) ) )
1092
1121
. option_layer ( concurrency_limit. map ( ConcurrencyLimitLayer :: new) )
1093
1122
. layer_fn ( |s| GrpcTimeout :: new ( s, timeout) )
1094
1123
. service ( svc) ;
0 commit comments