Skip to content

Commit 2490155

Browse files
Eric Dumazetdavem330
Eric Dumazet
authored andcommitted
tcp: remove in_flight parameter from cong_avoid() methods
Commit e114a71 ("tcp: fix cwnd limited checking to improve congestion control") obsoleted in_flight parameter from tcp_is_cwnd_limited() and its callers. This patch does the removal as promised. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent e114a71 commit 2490155

15 files changed

+36
-48
lines changed

include/net/tcp.h

+3-5
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ struct tcp_congestion_ops {
796796
/* return slow start threshold (required) */
797797
u32 (*ssthresh)(struct sock *sk);
798798
/* do new cwnd calculation (required) */
799-
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
799+
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
800800
/* call before changing ca_state (optional) */
801801
void (*set_state)(struct sock *sk, u8 new_state);
802802
/* call when cwnd event occurs (optional) */
@@ -828,7 +828,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
828828

829829
extern struct tcp_congestion_ops tcp_init_congestion_ops;
830830
u32 tcp_reno_ssthresh(struct sock *sk);
831-
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
831+
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
832832
extern struct tcp_congestion_ops tcp_reno;
833833

834834
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -986,10 +986,8 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
986986
* risks 100% overshoot. The advantage is that we discourage application to
987987
* either send more filler packets or data to artificially blow up the cwnd
988988
* usage, and allow application-limited process to probe bw more aggressively.
989-
*
990-
* TODO: remove in_flight once we can fix all callers, and their callers...
991989
*/
992-
static inline bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
990+
static inline bool tcp_is_cwnd_limited(const struct sock *sk)
993991
{
994992
const struct tcp_sock *tp = tcp_sk(sk);
995993

net/ipv4/tcp_bic.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
140140
ca->cnt = 1;
141141
}
142142

143-
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
144-
u32 in_flight)
143+
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
145144
{
146145
struct tcp_sock *tp = tcp_sk(sk);
147146
struct bictcp *ca = inet_csk_ca(sk);
148147

149-
if (!tcp_is_cwnd_limited(sk, in_flight))
148+
if (!tcp_is_cwnd_limited(sk))
150149
return;
151150

152151
if (tp->snd_cwnd <= tp->snd_ssthresh)

net/ipv4/tcp_cong.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -317,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
317317
/* This is Jacobson's slow start and congestion avoidance.
318318
* SIGCOMM '88, p. 328.
319319
*/
320-
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
320+
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
321321
{
322322
struct tcp_sock *tp = tcp_sk(sk);
323323

324-
if (!tcp_is_cwnd_limited(sk, in_flight))
324+
if (!tcp_is_cwnd_limited(sk))
325325
return;
326326

327327
/* In "safe" area, increase. */

net/ipv4/tcp_cubic.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
304304
ca->cnt = 1;
305305
}
306306

307-
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
308-
u32 in_flight)
307+
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
309308
{
310309
struct tcp_sock *tp = tcp_sk(sk);
311310
struct bictcp *ca = inet_csk_ca(sk);
312311

313-
if (!tcp_is_cwnd_limited(sk, in_flight))
312+
if (!tcp_is_cwnd_limited(sk))
314313
return;
315314

316315
if (tp->snd_cwnd <= tp->snd_ssthresh) {

net/ipv4/tcp_highspeed.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
109109
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110110
}
111111

112-
static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
112+
static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
113113
{
114114
struct tcp_sock *tp = tcp_sk(sk);
115115
struct hstcp *ca = inet_csk_ca(sk);
116116

117-
if (!tcp_is_cwnd_limited(sk, in_flight))
117+
if (!tcp_is_cwnd_limited(sk))
118118
return;
119119

120120
if (tp->snd_cwnd <= tp->snd_ssthresh)

net/ipv4/tcp_htcp.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
227227
return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
228228
}
229229

230-
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
230+
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
231231
{
232232
struct tcp_sock *tp = tcp_sk(sk);
233233
struct htcp *ca = inet_csk_ca(sk);
234234

235-
if (!tcp_is_cwnd_limited(sk, in_flight))
235+
if (!tcp_is_cwnd_limited(sk))
236236
return;
237237

238238
if (tp->snd_cwnd <= tp->snd_ssthresh)

net/ipv4/tcp_hybla.c

+3-4
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
8787
* o Give cwnd a new value based on the model proposed
8888
* o remember increments <1
8989
*/
90-
static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
91-
u32 in_flight)
90+
static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
9291
{
9392
struct tcp_sock *tp = tcp_sk(sk);
9493
struct hybla *ca = inet_csk_ca(sk);
@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
101100
ca->minrtt_us = tp->srtt_us;
102101
}
103102

104-
if (!tcp_is_cwnd_limited(sk, in_flight))
103+
if (!tcp_is_cwnd_limited(sk))
105104
return;
106105

107106
if (!ca->hybla_en) {
108-
tcp_reno_cong_avoid(sk, ack, acked, in_flight);
107+
tcp_reno_cong_avoid(sk, ack, acked);
109108
return;
110109
}
111110

net/ipv4/tcp_illinois.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
255255
/*
256256
* Increase window in response to successful acknowledgment.
257257
*/
258-
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
259-
u32 in_flight)
258+
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
260259
{
261260
struct tcp_sock *tp = tcp_sk(sk);
262261
struct illinois *ca = inet_csk_ca(sk);
@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
265264
update_params(sk);
266265

267266
/* RFC2861 only increase cwnd if fully utilized */
268-
if (!tcp_is_cwnd_limited(sk, in_flight))
267+
if (!tcp_is_cwnd_limited(sk))
269268
return;
270269

271270
/* In slow start */

net/ipv4/tcp_input.c

+4-5
Original file line numberDiff line numberDiff line change
@@ -2938,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
29382938
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
29392939
}
29402940

2941-
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
2941+
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
29422942
{
29432943
const struct inet_connection_sock *icsk = inet_csk(sk);
2944-
icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight);
2944+
2945+
icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
29452946
tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
29462947
}
29472948

@@ -3364,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
33643365
u32 ack_seq = TCP_SKB_CB(skb)->seq;
33653366
u32 ack = TCP_SKB_CB(skb)->ack_seq;
33663367
bool is_dupack = false;
3367-
u32 prior_in_flight;
33683368
u32 prior_fackets;
33693369
int prior_packets = tp->packets_out;
33703370
const int prior_unsacked = tp->packets_out - tp->sacked_out;
@@ -3397,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
33973397
flag |= FLAG_SND_UNA_ADVANCED;
33983398

33993399
prior_fackets = tp->fackets_out;
3400-
prior_in_flight = tcp_packets_in_flight(tp);
34013400

34023401
/* ts_recent update must be made after we are sure that the packet
34033402
* is in window.
@@ -3452,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
34523451

34533452
/* Advance cwnd if state allows */
34543453
if (tcp_may_raise_cwnd(sk, flag))
3455-
tcp_cong_avoid(sk, ack, acked, prior_in_flight);
3454+
tcp_cong_avoid(sk, ack, acked);
34563455

34573456
if (tcp_ack_is_dubious(sk, flag)) {
34583457
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));

net/ipv4/tcp_lp.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
115115
* Will only call newReno CA when away from inference.
116116
* From TCP-LP's paper, this will be handled in additive increasement.
117117
*/
118-
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
119-
u32 in_flight)
118+
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
120119
{
121120
struct lp *lp = inet_csk_ca(sk);
122121

123122
if (!(lp->flag & LP_WITHIN_INF))
124-
tcp_reno_cong_avoid(sk, ack, acked, in_flight);
123+
tcp_reno_cong_avoid(sk, ack, acked);
125124
}
126125

127126
/**

net/ipv4/tcp_output.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1408,7 +1408,7 @@ static void tcp_cwnd_validate(struct sock *sk, u32 unsent_segs)
14081408

14091409
tp->lsnd_pending = tp->packets_out + unsent_segs;
14101410

1411-
if (tcp_is_cwnd_limited(sk, 0)) {
1411+
if (tcp_is_cwnd_limited(sk)) {
14121412
/* Network is feed fully. */
14131413
tp->snd_cwnd_used = 0;
14141414
tp->snd_cwnd_stamp = tcp_time_stamp;

net/ipv4/tcp_scalable.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,11 @@
1515
#define TCP_SCALABLE_AI_CNT 50U
1616
#define TCP_SCALABLE_MD_SCALE 3
1717

18-
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked,
19-
u32 in_flight)
18+
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2019
{
2120
struct tcp_sock *tp = tcp_sk(sk);
2221

23-
if (!tcp_is_cwnd_limited(sk, in_flight))
22+
if (!tcp_is_cwnd_limited(sk))
2423
return;
2524

2625
if (tp->snd_cwnd <= tp->snd_ssthresh)

net/ipv4/tcp_vegas.c

+3-4
Original file line numberDiff line numberDiff line change
@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
163163
return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164164
}
165165

166-
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
167-
u32 in_flight)
166+
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
168167
{
169168
struct tcp_sock *tp = tcp_sk(sk);
170169
struct vegas *vegas = inet_csk_ca(sk);
171170

172171
if (!vegas->doing_vegas_now) {
173-
tcp_reno_cong_avoid(sk, ack, acked, in_flight);
172+
tcp_reno_cong_avoid(sk, ack, acked);
174173
return;
175174
}
176175

@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
195194
/* We don't have enough RTT samples to do the Vegas
196195
* calculation, so we'll behave like Reno.
197196
*/
198-
tcp_reno_cong_avoid(sk, ack, acked, in_flight);
197+
tcp_reno_cong_avoid(sk, ack, acked);
199198
} else {
200199
u32 rtt, diff;
201200
u64 target_cwnd;

net/ipv4/tcp_veno.c

+4-5
Original file line numberDiff line numberDiff line change
@@ -114,27 +114,26 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
114114
tcp_veno_init(sk);
115115
}
116116

117-
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
118-
u32 in_flight)
117+
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
119118
{
120119
struct tcp_sock *tp = tcp_sk(sk);
121120
struct veno *veno = inet_csk_ca(sk);
122121

123122
if (!veno->doing_veno_now) {
124-
tcp_reno_cong_avoid(sk, ack, acked, in_flight);
123+
tcp_reno_cong_avoid(sk, ack, acked);
125124
return;
126125
}
127126

128127
/* limited by applications */
129-
if (!tcp_is_cwnd_limited(sk, in_flight))
128+
if (!tcp_is_cwnd_limited(sk))
130129
return;
131130

132131
/* We do the Veno calculations only if we got enough rtt samples */
133132
if (veno->cntrtt <= 2) {
134133
/* We don't have enough rtt samples to do the Veno
135134
* calculation, so we'll behave like Reno.
136135
*/
137-
tcp_reno_cong_avoid(sk, ack, acked, in_flight);
136+
tcp_reno_cong_avoid(sk, ack, acked);
138137
} else {
139138
u64 target_cwnd;
140139
u32 rtt;

net/ipv4/tcp_yeah.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
6969
tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
7070
}
7171

72-
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked,
73-
u32 in_flight)
72+
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
7473
{
7574
struct tcp_sock *tp = tcp_sk(sk);
7675
struct yeah *yeah = inet_csk_ca(sk);
7776

78-
if (!tcp_is_cwnd_limited(sk, in_flight))
77+
if (!tcp_is_cwnd_limited(sk))
7978
return;
8079

8180
if (tp->snd_cwnd <= tp->snd_ssthresh)

0 commit comments

Comments
 (0)