Skip to content

Commit 8270d9c

Browse files
D-Wythedavem330
authored andcommitted
net/smc: Limit backlog connections
Current implementation does not handling backlog semantics, one potential risk is that server will be flooded by infinite amount connections, even if client was SMC-incapable. This patch works to put a limit on backlog connections, referring to the TCP implementation, we divides SMC connections into two categories: 1. Half SMC connection, which includes all TCP established while SMC not connections. 2. Full SMC connection, which includes all SMC established connections. For half SMC connection, since all half SMC connections starts with TCP established, we can achieve our goal by put a limit before TCP established. Refer to the implementation of TCP, this limits will based on not only the half SMC connections but also the full connections, which is also a constraint on full SMC connections. For full SMC connections, although we know exactly where it starts, it's quite hard to put a limit before it. The easiest way is to block wait before receive SMC confirm CLC message, while it's under protection by smc_server_lgr_pending, a global lock, which leads this limit to the entire host instead of a single listen socket. Another way is to drop the full connections, but considering the cast of SMC connections, we prefer to keep full SMC connections. Even so, the limits of full SMC connections still exists, see commits about half SMC connection below. After this patch, the limits of backend connection shows like: For SMC: 1. Client with SMC-capability can makes 2 * backlog full SMC connections or 1 * backlog half SMC connections and 1 * backlog full SMC connections at most. 2. Client without SMC-capability can only makes 1 * backlog half TCP connections and 1 * backlog full TCP connections. Signed-off-by: D. Wythe <alibuda@linux.alibaba.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 3079e34 commit 8270d9c

File tree

2 files changed

+50
-1
lines changed

2 files changed

+50
-1
lines changed

net/smc/af_smc.c

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,36 @@ static void smc_set_keepalive(struct sock *sk, int val)
7373
smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
7474
}
7575

76+
static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
77+
struct sk_buff *skb,
78+
struct request_sock *req,
79+
struct dst_entry *dst,
80+
struct request_sock *req_unhash,
81+
bool *own_req)
82+
{
83+
struct smc_sock *smc;
84+
85+
smc = smc_clcsock_user_data(sk);
86+
87+
if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
88+
sk->sk_max_ack_backlog)
89+
goto drop;
90+
91+
if (sk_acceptq_is_full(&smc->sk)) {
92+
NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
93+
goto drop;
94+
}
95+
96+
/* passthrough to original syn recv sock fct */
97+
return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
98+
own_req);
99+
100+
drop:
101+
dst_release(dst);
102+
tcp_listendrop(sk);
103+
return NULL;
104+
}
105+
76106
static struct smc_hashinfo smc_v4_hashinfo = {
77107
.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
78108
};
@@ -1595,6 +1625,9 @@ static void smc_listen_out(struct smc_sock *new_smc)
15951625
struct smc_sock *lsmc = new_smc->listen_smc;
15961626
struct sock *newsmcsk = &new_smc->sk;
15971627

1628+
if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
1629+
atomic_dec(&lsmc->queued_smc_hs);
1630+
15981631
if (lsmc->sk.sk_state == SMC_LISTEN) {
15991632
lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
16001633
smc_accept_enqueue(&lsmc->sk, newsmcsk);
@@ -2200,6 +2233,9 @@ static void smc_tcp_listen_work(struct work_struct *work)
22002233
if (!new_smc)
22012234
continue;
22022235

2236+
if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
2237+
atomic_inc(&lsmc->queued_smc_hs);
2238+
22032239
new_smc->listen_smc = lsmc;
22042240
new_smc->use_fallback = lsmc->use_fallback;
22052241
new_smc->fallback_rsn = lsmc->fallback_rsn;
@@ -2266,6 +2302,15 @@ static int smc_listen(struct socket *sock, int backlog)
22662302
smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
22672303
smc->clcsock->sk->sk_user_data =
22682304
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2305+
2306+
/* save original ops */
2307+
smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
2308+
2309+
smc->af_ops = *smc->ori_af_ops;
2310+
smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
2311+
2312+
inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
2313+
22692314
rc = kernel_listen(smc->clcsock, backlog);
22702315
if (rc) {
22712316
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;

net/smc/smc.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,10 @@ struct smc_sock { /* smc sock container */
252252
bool use_fallback; /* fallback to tcp */
253253
int fallback_rsn; /* reason for fallback */
254254
u32 peer_diagnosis; /* decline reason from peer */
255+
atomic_t queued_smc_hs; /* queued smc handshakes */
256+
struct inet_connection_sock_af_ops af_ops;
257+
const struct inet_connection_sock_af_ops *ori_af_ops;
258+
/* original af ops */
255259
int sockopt_defer_accept;
256260
/* sockopt TCP_DEFER_ACCEPT
257261
* value
@@ -276,7 +280,7 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
276280
return (struct smc_sock *)sk;
277281
}
278282

279-
static inline struct smc_sock *smc_clcsock_user_data(struct sock *clcsk)
283+
static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
280284
{
281285
return (struct smc_sock *)
282286
((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);

0 commit comments

Comments
 (0)