@@ -66,6 +66,7 @@ use tower::{
66
66
layer:: util:: { Identity , Stack } ,
67
67
layer:: Layer ,
68
68
limit:: concurrency:: ConcurrencyLimitLayer ,
69
+ load_shed:: LoadShedLayer ,
69
70
util:: BoxCloneService ,
70
71
Service , ServiceBuilder , ServiceExt ,
71
72
} ;
@@ -87,6 +88,7 @@ const DEFAULT_HTTP2_KEEPALIVE_TIMEOUT: Duration = Duration::from_secs(20);
87
88
pub struct Server < L = Identity > {
88
89
trace_interceptor : Option < TraceInterceptor > ,
89
90
concurrency_limit : Option < usize > ,
91
+ load_shed : bool ,
90
92
timeout : Option < Duration > ,
91
93
#[ cfg( feature = "_tls-any" ) ]
92
94
tls : Option < TlsAcceptor > ,
@@ -111,6 +113,7 @@ impl Default for Server<Identity> {
111
113
Self {
112
114
trace_interceptor : None ,
113
115
concurrency_limit : None ,
116
+ load_shed : false ,
114
117
timeout : None ,
115
118
#[ cfg( feature = "_tls-any" ) ]
116
119
tls : None ,
@@ -179,6 +182,27 @@ impl<L> Server<L> {
179
182
}
180
183
}
181
184
185
+ /// Enable or disable load shedding. The default is disabled.
186
+ ///
187
+ /// When load shedding is enabled, if the service responds with not ready
188
+ /// the request will immediately be rejected with a
189
+ /// [`resource_exhausted`](https://docs.rs/tonic/latest/tonic/struct.Status.html#method.resource_exhausted) error.
190
+ /// The default is to buffer requests. This is especially useful in combination with
191
+ /// setting a concurrency limit per connection.
192
+ ///
193
+ /// # Example
194
+ ///
195
+ /// ```
196
+ /// # use tonic::transport::Server;
197
+ /// # use tower_service::Service;
198
+ /// # let builder = Server::builder();
199
+ /// builder.load_shed(true);
200
+ /// ```
201
+ #[ must_use]
202
+ pub fn load_shed ( self , load_shed : bool ) -> Self {
203
+ Server { load_shed, ..self }
204
+ }
205
+
182
206
/// Set a timeout on for all request handlers.
183
207
///
184
208
/// # Example
@@ -514,6 +538,7 @@ impl<L> Server<L> {
514
538
service_builder : self . service_builder . layer ( new_layer) ,
515
539
trace_interceptor : self . trace_interceptor ,
516
540
concurrency_limit : self . concurrency_limit ,
541
+ load_shed : self . load_shed ,
517
542
timeout : self . timeout ,
518
543
#[ cfg( feature = "_tls-any" ) ]
519
544
tls : self . tls ,
@@ -643,6 +668,7 @@ impl<L> Server<L> {
643
668
{
644
669
let trace_interceptor = self . trace_interceptor . clone ( ) ;
645
670
let concurrency_limit = self . concurrency_limit ;
671
+ let load_shed = self . load_shed ;
646
672
let init_connection_window_size = self . init_connection_window_size ;
647
673
let init_stream_window_size = self . init_stream_window_size ;
648
674
let max_concurrent_streams = self . max_concurrent_streams ;
@@ -667,6 +693,7 @@ impl<L> Server<L> {
667
693
let mut svc = MakeSvc {
668
694
inner : svc,
669
695
concurrency_limit,
696
+ load_shed,
670
697
timeout,
671
698
trace_interceptor,
672
699
_io : PhantomData ,
@@ -1051,6 +1078,7 @@ impl<S> fmt::Debug for Svc<S> {
1051
1078
#[ derive( Clone ) ]
1052
1079
struct MakeSvc < S , IO > {
1053
1080
concurrency_limit : Option < usize > ,
1081
+ load_shed : bool ,
1054
1082
timeout : Option < Duration > ,
1055
1083
inner : S ,
1056
1084
trace_interceptor : Option < TraceInterceptor > ,
@@ -1084,6 +1112,7 @@ where
1084
1112
1085
1113
let svc = ServiceBuilder :: new ( )
1086
1114
. layer ( RecoverErrorLayer :: new ( ) )
1115
+ . option_layer ( self . load_shed . then_some ( LoadShedLayer :: new ( ) ) )
1087
1116
. option_layer ( concurrency_limit. map ( ConcurrencyLimitLayer :: new) )
1088
1117
. layer_fn ( |s| GrpcTimeout :: new ( s, timeout) )
1089
1118
. service ( svc) ;
0 commit comments