Skip to content

Commit 24a8fda

Browse files
chunkeeylinvjw
authored andcommitted
mac80211: serialize rx path workers
This patch addresses the issue of serialization between the main rx path and various reorder release timers. <http://www.spinics.net/lists/linux-wireless/msg57214.html> It converts the previously local "frames" queue into a global rx queue [rx_skb_queue]. This way, everyone (be it the main rx-path or some reorder release timeout) can add frames to it. Only one active rx handler worker [ieee80211_rx_handlers] is needed. All other threads which have lost the race of "runnning_rx_handler" can now simply "return", knowing that the thread who had the "edge" will also take care of their workload. Signed-off-by: Christian Lamparter <chunkeey@googlemail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
1 parent 1186488 commit 24a8fda

File tree

3 files changed

+49
-43
lines changed

3 files changed

+49
-43
lines changed

net/mac80211/ieee80211_i.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -761,6 +761,15 @@ struct ieee80211_local {
761761
struct sk_buff_head skb_queue;
762762
struct sk_buff_head skb_queue_unreliable;
763763

764+
/*
765+
* Internal FIFO queue which is shared between multiple rx path
766+
* stages. Its main task is to provide a serialization mechanism,
767+
* so all rx handlers can enjoy having exclusive access to their
768+
* private data structures.
769+
*/
770+
struct sk_buff_head rx_skb_queue;
771+
bool running_rx_handler; /* protected by rx_skb_queue.lock */
772+
764773
/* Station data */
765774
/*
766775
* The mutex only protects the list and counter,

net/mac80211/main.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -569,6 +569,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
569569
spin_lock_init(&local->filter_lock);
570570
spin_lock_init(&local->queue_stop_reason_lock);
571571

572+
skb_queue_head_init(&local->rx_skb_queue);
573+
572574
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
573575

574576
ieee80211_work_init(local);
@@ -912,6 +914,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
912914
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
913915
skb_queue_purge(&local->skb_queue);
914916
skb_queue_purge(&local->skb_queue_unreliable);
917+
skb_queue_purge(&local->rx_skb_queue);
915918

916919
destroy_workqueue(local->workqueue);
917920
wiphy_unregister(local->hw.wiphy);

net/mac80211/rx.c

Lines changed: 37 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -533,9 +533,9 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
533533

534534
static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
535535
struct tid_ampdu_rx *tid_agg_rx,
536-
int index,
537-
struct sk_buff_head *frames)
536+
int index)
538537
{
538+
struct ieee80211_local *local = hw_to_local(hw);
539539
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
540540
struct ieee80211_rx_status *status;
541541

@@ -549,16 +549,15 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
549549
tid_agg_rx->reorder_buf[index] = NULL;
550550
status = IEEE80211_SKB_RXCB(skb);
551551
status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
552-
__skb_queue_tail(frames, skb);
552+
skb_queue_tail(&local->rx_skb_queue, skb);
553553

554554
no_frame:
555555
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
556556
}
557557

558558
static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
559559
struct tid_ampdu_rx *tid_agg_rx,
560-
u16 head_seq_num,
561-
struct sk_buff_head *frames)
560+
u16 head_seq_num)
562561
{
563562
int index;
564563

@@ -567,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
567566
while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
568567
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
569568
tid_agg_rx->buf_size;
570-
ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
569+
ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
571570
}
572571
}
573572

@@ -583,8 +582,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
583582
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
584583

585584
static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
586-
struct tid_ampdu_rx *tid_agg_rx,
587-
struct sk_buff_head *frames)
585+
struct tid_ampdu_rx *tid_agg_rx)
588586
{
589587
int index, j;
590588

@@ -615,8 +613,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
615613
wiphy_debug(hw->wiphy,
616614
"release an RX reorder frame due to timeout on earlier frames\n");
617615
#endif
618-
ieee80211_release_reorder_frame(hw, tid_agg_rx,
619-
j, frames);
616+
ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
620617

621618
/*
622619
* Increment the head seq# also for the skipped slots.
@@ -626,7 +623,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
626623
skipped = 0;
627624
}
628625
} else while (tid_agg_rx->reorder_buf[index]) {
629-
ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
626+
ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
630627
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
631628
tid_agg_rx->buf_size;
632629
}
@@ -682,8 +679,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
682679
*/
683680
static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
684681
struct tid_ampdu_rx *tid_agg_rx,
685-
struct sk_buff *skb,
686-
struct sk_buff_head *frames)
682+
struct sk_buff *skb)
687683
{
688684
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
689685
u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -710,8 +706,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
710706
if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
711707
head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
712708
/* release stored frames up to new head to stack */
713-
ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
714-
frames);
709+
ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
715710
}
716711

717712
/* Now the new frame is always in the range of the reordering buffer */
@@ -739,7 +734,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
739734
tid_agg_rx->reorder_buf[index] = skb;
740735
tid_agg_rx->reorder_time[index] = jiffies;
741736
tid_agg_rx->stored_mpdu_num++;
742-
ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
737+
ieee80211_sta_reorder_release(hw, tid_agg_rx);
743738

744739
out:
745740
spin_unlock(&tid_agg_rx->reorder_lock);
@@ -750,8 +745,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
750745
* Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
751746
* true if the MPDU was buffered, false if it should be processed.
752747
*/
753-
static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
754-
struct sk_buff_head *frames)
748+
static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
755749
{
756750
struct sk_buff *skb = rx->skb;
757751
struct ieee80211_local *local = rx->local;
@@ -806,11 +800,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
806800
* sure that we cannot get to it any more before doing
807801
* anything with it.
808802
*/
809-
if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
803+
if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
810804
return;
811805

812806
dont_reorder:
813-
__skb_queue_tail(frames, skb);
807+
skb_queue_tail(&local->rx_skb_queue, skb);
814808
}
815809

816810
static ieee80211_rx_result debug_noinline
@@ -1931,7 +1925,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
19311925
}
19321926

19331927
static ieee80211_rx_result debug_noinline
1934-
ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1928+
ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
19351929
{
19361930
struct ieee80211_local *local = rx->local;
19371931
struct ieee80211_hw *hw = &local->hw;
@@ -1971,8 +1965,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
19711965

19721966
spin_lock(&tid_agg_rx->reorder_lock);
19731967
/* release stored frames up to start of BAR */
1974-
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1975-
frames);
1968+
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
19761969
spin_unlock(&tid_agg_rx->reorder_lock);
19771970

19781971
kfree_skb(skb);
@@ -2489,8 +2482,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
24892482
}
24902483
}
24912484

2492-
static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2493-
struct sk_buff_head *frames)
2485+
static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
24942486
{
24952487
ieee80211_rx_result res = RX_DROP_MONITOR;
24962488
struct sk_buff *skb;
@@ -2502,7 +2494,15 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
25022494
goto rxh_next; \
25032495
} while (0);
25042496

2505-
while ((skb = __skb_dequeue(frames))) {
2497+
spin_lock(&rx->local->rx_skb_queue.lock);
2498+
if (rx->local->running_rx_handler)
2499+
goto unlock;
2500+
2501+
rx->local->running_rx_handler = true;
2502+
2503+
while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2504+
spin_unlock(&rx->local->rx_skb_queue.lock);
2505+
25062506
/*
25072507
* all the other fields are valid across frames
25082508
* that belong to an aMPDU since they are on the
@@ -2525,12 +2525,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
25252525
CALL_RXH(ieee80211_rx_h_mesh_fwding);
25262526
#endif
25272527
CALL_RXH(ieee80211_rx_h_data)
2528-
2529-
/* special treatment -- needs the queue */
2530-
res = ieee80211_rx_h_ctrl(rx, frames);
2531-
if (res != RX_CONTINUE)
2532-
goto rxh_next;
2533-
2528+
CALL_RXH(ieee80211_rx_h_ctrl);
25342529
CALL_RXH(ieee80211_rx_h_mgmt_check)
25352530
CALL_RXH(ieee80211_rx_h_action)
25362531
CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2539,18 +2534,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
25392534

25402535
rxh_next:
25412536
ieee80211_rx_handlers_result(rx, res);
2542-
2537+
spin_lock(&rx->local->rx_skb_queue.lock);
25432538
#undef CALL_RXH
25442539
}
2540+
2541+
rx->local->running_rx_handler = false;
2542+
2543+
unlock:
2544+
spin_unlock(&rx->local->rx_skb_queue.lock);
25452545
}
25462546

25472547
static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
25482548
{
2549-
struct sk_buff_head reorder_release;
25502549
ieee80211_rx_result res = RX_DROP_MONITOR;
25512550

2552-
__skb_queue_head_init(&reorder_release);
2553-
25542551
#define CALL_RXH(rxh) \
25552552
do { \
25562553
res = rxh(rx); \
@@ -2561,9 +2558,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
25612558
CALL_RXH(ieee80211_rx_h_passive_scan)
25622559
CALL_RXH(ieee80211_rx_h_check)
25632560

2564-
ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2561+
ieee80211_rx_reorder_ampdu(rx);
25652562

2566-
ieee80211_rx_handlers(rx, &reorder_release);
2563+
ieee80211_rx_handlers(rx);
25672564
return;
25682565

25692566
rxh_next:
@@ -2578,7 +2575,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
25782575
*/
25792576
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
25802577
{
2581-
struct sk_buff_head frames;
25822578
struct ieee80211_rx_data rx = {
25832579
.sta = sta,
25842580
.sdata = sta->sdata,
@@ -2591,13 +2587,11 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
25912587
if (!tid_agg_rx)
25922588
return;
25932589

2594-
__skb_queue_head_init(&frames);
2595-
25962590
spin_lock(&tid_agg_rx->reorder_lock);
2597-
ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
2591+
ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
25982592
spin_unlock(&tid_agg_rx->reorder_lock);
25992593

2600-
ieee80211_rx_handlers(&rx, &frames);
2594+
ieee80211_rx_handlers(&rx);
26012595
}
26022596

26032597
/* main receive path */

0 commit comments

Comments
 (0)