Skip to content

Commit 1f14a09

Browse files
Christoph Hellwigaxboe
authored andcommitted
block: factor out a blk_try_enter_queue helper
Factor out the code to try to get q_usage_counter without blocking into a separate helper. Both to improve code readability and to prepare for splitting bio_queue_enter from blk_queue_enter. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Darrick J. Wong <djwong@kernel.org> Link: https://lore.kernel.org/r/20210929071241.934472-3-hch@lst.de Tested-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent cc9c884 commit 1f14a09

File tree

1 file changed

+32
-28
lines changed

1 file changed

+32
-28
lines changed

block/blk-core.c

Lines changed: 32 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,30 @@ void blk_cleanup_queue(struct request_queue *q)
416416
}
417417
EXPORT_SYMBOL(blk_cleanup_queue);
418418

419+
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
420+
{
421+
rcu_read_lock();
422+
if (!percpu_ref_tryget_live(&q->q_usage_counter))
423+
goto fail;
424+
425+
/*
426+
* The code that increments the pm_only counter must ensure that the
427+
* counter is globally visible before the queue is unfrozen.
428+
*/
429+
if (blk_queue_pm_only(q) &&
430+
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
431+
goto fail_put;
432+
433+
rcu_read_unlock();
434+
return true;
435+
436+
fail_put:
437+
percpu_ref_put(&q->q_usage_counter);
438+
fail:
439+
rcu_read_unlock();
440+
return false;
441+
}
442+
419443
/**
420444
* blk_queue_enter() - try to increase q->q_usage_counter
421445
* @q: request queue pointer
@@ -425,47 +449,27 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
425449
{
426450
const bool pm = flags & BLK_MQ_REQ_PM;
427451

428-
while (true) {
429-
bool success = false;
430-
431-
rcu_read_lock();
432-
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
433-
/*
434-
* The code that increments the pm_only counter is
435-
* responsible for ensuring that that counter is
436-
* globally visible before the queue is unfrozen.
437-
*/
438-
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
439-
!blk_queue_pm_only(q)) {
440-
success = true;
441-
} else {
442-
percpu_ref_put(&q->q_usage_counter);
443-
}
444-
}
445-
rcu_read_unlock();
446-
447-
if (success)
448-
return 0;
449-
452+
while (!blk_try_enter_queue(q, pm)) {
450453
if (flags & BLK_MQ_REQ_NOWAIT)
451454
return -EBUSY;
452455

453456
/*
454-
* read pair of barrier in blk_freeze_queue_start(),
455-
* we need to order reading __PERCPU_REF_DEAD flag of
456-
* .q_usage_counter and reading .mq_freeze_depth or
457-
* queue dying flag, otherwise the following wait may
458-
* never return if the two reads are reordered.
457+
* read pair of barrier in blk_freeze_queue_start(), we need to
458+
* order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
459+
* reading .mq_freeze_depth or queue dying flag, otherwise the
460+
* following wait may never return if the two reads are
461+
* reordered.
459462
*/
460463
smp_rmb();
461-
462464
wait_event(q->mq_freeze_wq,
463465
(!q->mq_freeze_depth &&
464466
blk_pm_resume_queue(pm, q)) ||
465467
blk_queue_dying(q));
466468
if (blk_queue_dying(q))
467469
return -ENODEV;
468470
}
471+
472+
return 0;
469473
}
470474

471475
static inline int bio_queue_enter(struct bio *bio)

0 commit comments

Comments
 (0)