@@ -2315,7 +2315,26 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2315
2315
unsigned int flags )
2316
2316
{
2317
2317
struct mptcp_sock * msk = mptcp_sk (sk );
2318
- bool need_push , dispose_it ;
2318
+ bool dispose_it , need_push = false;
2319
+
2320
+ /* If the first subflow moved to a close state before accept, e.g. due
2321
+ * to an incoming reset, mptcp either:
2322
+ * - if either the subflow or the msk are dead, destroy the context
2323
+ * (the subflow socket is deleted by inet_child_forget) and the msk
2324
+ * - otherwise do nothing at the moment and take action at accept and/or
2325
+ * listener shutdown - user-space must be able to accept() the closed
2326
+ * socket.
2327
+ */
2328
+ if (msk -> in_accept_queue && msk -> first == ssk ) {
2329
+ if (!sock_flag (sk , SOCK_DEAD ) && !sock_flag (ssk , SOCK_DEAD ))
2330
+ return ;
2331
+
2332
+ /* ensure later check in mptcp_worker() will dispose the msk */
2333
+ sock_set_flag (sk , SOCK_DEAD );
2334
+ lock_sock_nested (ssk , SINGLE_DEPTH_NESTING );
2335
+ mptcp_subflow_drop_ctx (ssk );
2336
+ goto out_release ;
2337
+ }
2319
2338
2320
2339
dispose_it = !msk -> subflow || ssk != msk -> subflow -> sk ;
2321
2340
if (dispose_it )
@@ -2351,18 +2370,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2351
2370
if (!inet_csk (ssk )-> icsk_ulp_ops ) {
2352
2371
WARN_ON_ONCE (!sock_flag (ssk , SOCK_DEAD ));
2353
2372
kfree_rcu (subflow , rcu );
2354
- } else if (msk -> in_accept_queue && msk -> first == ssk ) {
2355
- /* if the first subflow moved to a close state, e.g. due to
2356
- * incoming reset and we reach here before inet_child_forget()
2357
- * the TCP stack could later try to close it via
2358
- * inet_csk_listen_stop(), or deliver it to the user space via
2359
- * accept().
2360
- * We can't delete the subflow - or risk a double free - nor let
2361
- * the msk survive - or will be leaked in the non accept scenario:
2362
- * fallback and let TCP cope with the subflow cleanup.
2363
- */
2364
- WARN_ON_ONCE (sock_flag (ssk , SOCK_DEAD ));
2365
- mptcp_subflow_drop_ctx (ssk );
2366
2373
} else {
2367
2374
/* otherwise tcp will dispose of the ssk and subflow ctx */
2368
2375
if (ssk -> sk_state == TCP_LISTEN ) {
@@ -2377,6 +2384,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2377
2384
/* close acquired an extra ref */
2378
2385
__sock_put (ssk );
2379
2386
}
2387
+
2388
+ out_release :
2380
2389
release_sock (ssk );
2381
2390
2382
2391
sock_put (ssk );
@@ -2431,21 +2440,14 @@ static void __mptcp_close_subflow(struct sock *sk)
2431
2440
mptcp_close_ssk (sk , ssk , subflow );
2432
2441
}
2433
2442
2434
- /* if the MPC subflow has been closed before the msk is accepted,
2435
- * msk will never be accept-ed, close it now
2436
- */
2437
- if (!msk -> first && msk -> in_accept_queue ) {
2438
- sock_set_flag (sk , SOCK_DEAD );
2439
- inet_sk_state_store (sk , TCP_CLOSE );
2440
- }
2441
2443
}
2442
2444
2443
- static bool mptcp_check_close_timeout (const struct sock * sk )
2445
+ static bool mptcp_should_close (const struct sock * sk )
2444
2446
{
2445
2447
s32 delta = tcp_jiffies32 - inet_csk (sk )-> icsk_mtup .probe_timestamp ;
2446
2448
struct mptcp_subflow_context * subflow ;
2447
2449
2448
- if (delta >= TCP_TIMEWAIT_LEN )
2450
+ if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk ( sk ) -> in_accept_queue )
2449
2451
return true;
2450
2452
2451
2453
/* if all subflows are in closed status don't bother with additional
@@ -2653,7 +2655,7 @@ static void mptcp_worker(struct work_struct *work)
2653
2655
* even if it is orphaned and in FIN_WAIT2 state
2654
2656
*/
2655
2657
if (sock_flag (sk , SOCK_DEAD )) {
2656
- if (mptcp_check_close_timeout (sk )) {
2658
+ if (mptcp_should_close (sk )) {
2657
2659
inet_sk_state_store (sk , TCP_CLOSE );
2658
2660
mptcp_do_fastclose (sk );
2659
2661
}
@@ -2899,6 +2901,14 @@ static void __mptcp_destroy_sock(struct sock *sk)
2899
2901
sock_put (sk );
2900
2902
}
2901
2903
2904
+ void __mptcp_unaccepted_force_close (struct sock * sk )
2905
+ {
2906
+ sock_set_flag (sk , SOCK_DEAD );
2907
+ inet_sk_state_store (sk , TCP_CLOSE );
2908
+ mptcp_do_fastclose (sk );
2909
+ __mptcp_destroy_sock (sk );
2910
+ }
2911
+
2902
2912
static __poll_t mptcp_check_readable (struct mptcp_sock * msk )
2903
2913
{
2904
2914
/* Concurrent splices from sk_receive_queue into receive_queue will
@@ -3737,6 +3747,18 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
3737
3747
if (!ssk -> sk_socket )
3738
3748
mptcp_sock_graft (ssk , newsock );
3739
3749
}
3750
+
3751
+ /* Do late cleanup for the first subflow as necessary. Also
3752
+ * deal with bad peers not doing a complete shutdown.
3753
+ */
3754
+ if (msk -> first &&
3755
+ unlikely (inet_sk_state_load (msk -> first ) == TCP_CLOSE )) {
3756
+ __mptcp_close_ssk (newsk , msk -> first ,
3757
+ mptcp_subflow_ctx (msk -> first ), 0 );
3758
+ if (unlikely (list_empty (& msk -> conn_list )))
3759
+ inet_sk_state_store (newsk , TCP_CLOSE );
3760
+ }
3761
+
3740
3762
release_sock (newsk );
3741
3763
}
3742
3764
0 commit comments