Skip to content

Commit

Permalink
block: make every barrier action optional
Browse files Browse the repository at this point in the history
In all barrier sequences, the barrier write itself was always assumed
to be issued and thus didn't have corresponding control flag.  This
patch adds QUEUE_ORDERED_DO_BAR and unify action mask handling in
start_ordered() such that any barrier action can be skipped.

This patch doesn't introduce any visible behavior changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
  • Loading branch information
htejun authored and Jens Axboe committed Dec 29, 2008
1 parent a738467 commit f671620
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 19 deletions.
41 changes: 24 additions & 17 deletions block/blk-barrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,19 +158,10 @@ static inline struct request *start_ordered(struct request_queue *q,
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;

/*
* Prep proxy barrier request.
*/
/* stash away the original request */
elv_dequeue_request(q, rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;
rq = NULL;

/*
* Queue ordered sequence. As we stack them at the head, we
Expand All @@ -181,23 +172,39 @@ static inline struct request *start_ordered(struct request_queue *q,
* there will be no data written between the pre and post flush.
* Hence a single flush will suffice.
*/
if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq))
if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) &&
!blk_empty_barrier(q->orig_bar_rq)) {
queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
else
rq = &q->post_flush_rq;
} else
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;

elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
if (q->ordered & QUEUE_ORDERED_DO_BAR) {
rq = &q->bar_rq;

/* initialize proxy request and queue it */
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;

elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
} else
q->ordseq |= QUEUE_ORDSEQ_BAR;

if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
rq = &q->pre_flush_rq;
} else
q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;

if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0)
q->ordseq |= QUEUE_ORDSEQ_DRAIN;
else
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
rq = NULL;
else
q->ordseq |= QUEUE_ORDSEQ_DRAIN;

return rq;
}
Expand Down
7 changes: 5 additions & 2 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -526,20 +526,23 @@ enum {
QUEUE_ORDERED_BY_DRAIN = 0x01,
QUEUE_ORDERED_BY_TAG = 0x02,
QUEUE_ORDERED_DO_PREFLUSH = 0x10,
QUEUE_ORDERED_DO_BAR = 0x20,
QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
QUEUE_ORDERED_DO_FUA = 0x80,

QUEUE_ORDERED_NONE = 0x00,

QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN,
QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
QUEUE_ORDERED_DO_BAR,
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH,
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_FUA,

QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG,
QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
QUEUE_ORDERED_DO_BAR,
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH,
Expand Down

0 comments on commit f671620

Please sign in to comment.