Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
mptcp: Need to safe/restore/cleanup tsorted_anchor
Browse files Browse the repository at this point in the history
The dst_entry in the skb is now overloaded with the tcp_tsorted_anchor.
Thus, we need to call the helper functions to safe/restore the latter
before cloning or freeing an skb.

See also e208007 ("tcp: new list for sent but unacked skbs for RACK recovery")

Fixes: c61bc63 ("Merge tag 'v4.15-rc3' into mptcp_trunk")
Signed-off-by: Christoph Paasch <cpaasch@apple.com>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
  • Loading branch information
cpaasch authored and matttbe committed Feb 9, 2018
1 parent 1da82c8 commit cdb1b92
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 3 deletions.
3 changes: 2 additions & 1 deletion net/mptcp/mptcp_ctrl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1205,6 +1205,7 @@ static int mptcp_alloc_mpcb(struct sock *meta_sk, __u64 remote_key,
skb_queue_head_init(&mpcb->reinject_queue);
master_tp->out_of_order_queue = RB_ROOT;
INIT_LIST_HEAD(&master_tp->tsq_node);
INIT_LIST_HEAD(&master_tp->tsorted_sent_queue);

master_sk->sk_tsq_flags = 0;

Expand Down Expand Up @@ -1551,7 +1552,7 @@ static int mptcp_sub_send_fin(struct sock *sk)
if (!skb)
return 1;

/* Reserve space for headers and prepare control bits. */
INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
Expand Down
14 changes: 12 additions & 2 deletions net/mptcp/mptcp_output.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,9 @@ static void __mptcp_reinject_data(struct sk_buff *orig_skb, struct sock *meta_sk
* will be changed when it's going to be reinjected on another
* subflow.
*/
skb = pskb_copy_for_clone(orig_skb, GFP_ATOMIC);
tcp_skb_tsorted_save(orig_skb) {
skb = pskb_copy_for_clone(orig_skb, GFP_ATOMIC);
} tcp_skb_tsorted_restore(orig_skb);
} else {
if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
__skb_unlink(orig_skb, &sk->sk_write_queue);
Expand All @@ -155,6 +157,9 @@ static void __mptcp_reinject_data(struct sk_buff *orig_skb, struct sock *meta_sk

skb->sk = meta_sk;

/* Make sure that this list is clean */
tcp_skb_tsorted_anchor_cleanup(skb);

/* Reset subflow-specific TCP control-data */
TCP_SKB_CB(skb)->sacked = 0;
TCP_SKB_CB(skb)->tcp_flags &= (TCPHDR_ACK | TCPHDR_PSH);
Expand Down Expand Up @@ -497,7 +502,9 @@ static bool mptcp_skb_entail(struct sock *sk, struct sk_buff *skb, int reinject)
TCP_SKB_CB(skb)->mptcp_flags |= (mpcb->snd_hiseq_index ?
MPTCPHDR_SEQ64_INDEX : 0);

subskb = pskb_copy_for_clone(skb, GFP_ATOMIC);
tcp_skb_tsorted_save(skb) {
subskb = pskb_copy_for_clone(skb, GFP_ATOMIC);
} tcp_skb_tsorted_restore(skb);
if (!subskb)
return false;

Expand Down Expand Up @@ -547,6 +554,9 @@ static bool mptcp_skb_entail(struct sock *sk, struct sk_buff *skb, int reinject)
* segment is not part of the subflow but on a meta-only-level.
*/
if (!mptcp_is_data_fin(subskb) || tcb->end_seq != tcb->seq) {
/* Make sure that this list is clean */
INIT_LIST_HEAD(&subskb->tcp_tsorted_anchor);

tcp_add_write_queue_tail(sk, subskb);
sk->sk_wmem_queued += subskb->truesize;
sk_mem_charge(sk, subskb->truesize);
Expand Down

0 comments on commit cdb1b92

Please sign in to comment.