Skip to content

Commit 83afb36

Browse files
committed
Merge branch 'tcp-tso-defer-improvements'
Eric Dumazet says: ==================== tcp: tso defer improvements This series makes tcp_tso_should_defer() a bit smarter : 1) MSG_EOR gives a hint to TCP to not defer some skbs 2) Second patch takes into account that head tstamp can be in the future. 3) Third patch uses existing high resolution state variables to have a more precise heuristic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 5e13a0d + a682850 commit 83afb36

File tree

1 file changed

+13
-5
lines changed

1 file changed

+13
-5
lines changed

net/ipv4/tcp_output.c

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1907,10 +1907,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
19071907
bool *is_cwnd_limited, u32 max_segs)
19081908
{
19091909
const struct inet_connection_sock *icsk = inet_csk(sk);
1910-
u32 age, send_win, cong_win, limit, in_flight;
1910+
u32 send_win, cong_win, limit, in_flight;
19111911
struct tcp_sock *tp = tcp_sk(sk);
19121912
struct sk_buff *head;
19131913
int win_divisor;
1914+
s64 delta;
19141915

19151916
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
19161917
goto send_now;
@@ -1919,9 +1920,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
19191920
goto send_now;
19201921

19211922
/* Avoid bursty behavior by allowing defer
1922-
* only if the last write was recent.
1923+
* only if the last write was recent (1 ms).
1924+
* Note that tp->tcp_wstamp_ns can be in the future if we have
1925+
* packets waiting in a qdisc or device for EDT delivery.
19231926
*/
1924-
if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
1927+
delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1928+
if (delta > 0)
19251929
goto send_now;
19261930

19271931
in_flight = tcp_packets_in_flight(tp);
@@ -1944,6 +1948,10 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
19441948
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
19451949
goto send_now;
19461950

1951+
/* If this packet won't get more data, do not wait. */
1952+
if (TCP_SKB_CB(skb)->eor)
1953+
goto send_now;
1954+
19471955
win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
19481956
if (win_divisor) {
19491957
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1968,9 +1976,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
19681976
head = tcp_rtx_queue_head(sk);
19691977
if (!head)
19701978
goto send_now;
1971-
age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
1979+
delta = tp->tcp_clock_cache - head->tstamp;
19721980
/* If next ACK is likely to come too late (half srtt), do not defer */
1973-
if (age < (tp->srtt_us >> 4))
1981+
if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
19741982
goto send_now;
19751983

19761984
/* Ok, it looks like it is advisable to defer. */

0 commit comments

Comments
 (0)