@@ -675,12 +675,21 @@ void blk_queue_exit(struct request_queue *q)
675
675
percpu_ref_put (& q -> q_usage_counter );
676
676
}
677
677
678
+ static void blk_queue_usage_counter_release_swork (struct swork_event * sev )
679
+ {
680
+ struct request_queue * q =
681
+ container_of (sev , struct request_queue , mq_pcpu_wake );
682
+
683
+ wake_up_all (& q -> mq_freeze_wq );
684
+ }
685
+
678
686
static void blk_queue_usage_counter_release (struct percpu_ref * ref )
679
687
{
680
688
struct request_queue * q =
681
689
container_of (ref , struct request_queue , q_usage_counter );
682
690
683
- wake_up_all (& q -> mq_freeze_wq );
691
+ if (wq_has_sleeper (& q -> mq_freeze_wq ))
692
+ swork_queue (& q -> mq_pcpu_wake );
684
693
}
685
694
686
695
static void blk_rq_timed_out_timer (unsigned long data )
@@ -751,6 +760,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
751
760
__set_bit (QUEUE_FLAG_BYPASS , & q -> queue_flags );
752
761
753
762
init_waitqueue_head (& q -> mq_freeze_wq );
763
+ INIT_SWORK (& q -> mq_pcpu_wake , blk_queue_usage_counter_release_swork );
754
764
755
765
/*
756
766
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -3556,6 +3566,8 @@ int __init blk_dev_init(void)
3556
3566
if (!kblockd_workqueue )
3557
3567
panic ("Failed to create kblockd\n" );
3558
3568
3569
+ BUG_ON (swork_get ());
3570
+
3559
3571
request_cachep = kmem_cache_create ("blkdev_requests" ,
3560
3572
sizeof (struct request ), 0 , SLAB_PANIC , NULL );
3561
3573
0 commit comments