From 405e3d757f3cb48c2e1f4c6f042b58b246b41a69 Mon Sep 17 00:00:00 2001 From: Robert Lubos Date: Thu, 11 Aug 2022 16:46:49 +0200 Subject: [PATCH] net: tcp: Add exponential backoff for ZWP probing Persist timer should implement exponential backoff, as per RFC 1122: The transmitting host SHOULD send the first zero-window probe when a zero window has existed for the retransmission timeout period (see Section 4.2.2.15), and SHOULD increase exponentially the interval between successive probes. Implement this, by following Linux behaviour, and simply double the timeout or each probe transmission. Additionally, prevent reseting the persist timer in case an acknowledgment is received with zero window size, and the timer is already running. Signed-off-by: Robert Lubos --- subsys/net/ip/tcp.c | 23 ++++++++++++++++++++--- subsys/net/ip/tcp_private.h | 1 + 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/subsys/net/ip/tcp.c b/subsys/net/ip/tcp.c index 7be0e031319006..a5218a105e1bbe 100644 --- a/subsys/net/ip/tcp.c +++ b/subsys/net/ip/tcp.c @@ -30,6 +30,7 @@ LOG_MODULE_REGISTER(net_tcp, CONFIG_NET_TCP_LOG_LEVEL); #define ACK_TIMEOUT K_MSEC(ACK_TIMEOUT_MS) #define FIN_TIMEOUT K_MSEC(tcp_fin_timeout_ms) #define ACK_DELAY K_MSEC(100) +#define ZWP_MAX_DELAY_MS 120000 static int tcp_rto = CONFIG_NET_TCP_INIT_RETRANSMISSION_TIMEOUT; static int tcp_retries = CONFIG_NET_TCP_RETRY_COUNT; @@ -1346,8 +1347,20 @@ static void tcp_send_zwp(struct k_work *work) tcp_derive_rto(conn); if (conn->send_win == 0) { + uint64_t timeout; + + /* Make sure the retry counter does not overflow. */ + if (conn->zwp_retries < UINT8_MAX) { + conn->zwp_retries++; + } + + timeout = TCP_RTO_MS << conn->zwp_retries; + if (timeout == 0 || timeout > ZWP_MAX_DELAY_MS) { + timeout = ZWP_MAX_DELAY_MS; + } + (void)k_work_reschedule_for_queue( - &tcp_work_q, &conn->persist_timer, K_MSEC(TCP_RTO_MS)); + &tcp_work_q, &conn->persist_timer, K_MSEC(timeout)); } k_mutex_unlock(&conn->lock); @@ -2008,8 +2021,12 @@ static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt) } if (conn->send_win == 0) { - (void)k_work_reschedule_for_queue( - &tcp_work_q, &conn->persist_timer, K_MSEC(TCP_RTO_MS)); + if (!k_work_delayable_is_pending(&conn->persist_timer)) { + conn->zwp_retries = 0; + (void)k_work_reschedule_for_queue( + &tcp_work_q, &conn->persist_timer, + K_MSEC(TCP_RTO_MS)); + } } else { (void)k_work_cancel_delayable(&conn->persist_timer); } diff --git a/subsys/net/ip/tcp_private.h b/subsys/net/ip/tcp_private.h index d0b15dfc73d3ee..9768a37b9eff65 100644 --- a/subsys/net/ip/tcp_private.h +++ b/subsys/net/ip/tcp_private.h @@ -273,6 +273,7 @@ struct tcp { /* TCP connection */ uint16_t rto; #endif uint8_t send_data_retries; + uint8_t zwp_retries; bool in_retransmission : 1; bool in_connect : 1; bool in_close : 1;