Skip to content

Commit

Permalink
block: blk-merge: try to make front segments in full size
Browse files Browse the repository at this point in the history
When merging one bvec into segment, if the bvec is too big
to merge, current policy is to move the whole bvec into another
new segment.

This patchset changes the policy into trying to maximize size of
front segments, that means in above situation, part of bvec
is merged into current segment, and the remainder is put
into next segment.

This patch prepares for support multipage bvec because
it can be quite common to see this case and we should try
to make front segments in full size.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Ming Lei authored and axboe committed Jan 6, 2018
1 parent 6a501bf commit a2d3796
Showing 1 changed file with 49 additions and 5 deletions.
54 changes: 49 additions & 5 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
unsigned advance = 0;

bio_for_each_segment(bv, bio, iter) {
/*
Expand All @@ -134,12 +135,32 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
}

if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
goto new_segment;
if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
/*
* One assumption is that initial value of
* @seg_size(equals to bv.bv_len) won't be
* bigger than max segment size, but this
* becomes false after multipage bvecs.
*/
advance = queue_max_segment_size(q) - seg_size;

if (advance > 0) {
seg_size += advance;
sectors += advance >> 9;
bv.bv_len -= advance;
bv.bv_offset += advance;
}

/*
* Still need to put remainder of current
* bvec into a new segment.
*/
goto new_segment;
}

seg_size += bv.bv_len;
bvprv = bv;
Expand All @@ -161,6 +182,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;

/* restore the bvec for iterator */
if (advance) {
bv.bv_len += advance;
bv.bv_offset -= advance;
advance = 0;
}
}

do_split = false;
Expand Down Expand Up @@ -361,16 +388,29 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
{

int nbytes = bvec->bv_len;
unsigned advance = 0;

if (*sg && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;

if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;

/*
* try best to merge part of the bvec into previous
* segment and follow same policy with
* blk_bio_segment_split()
*/
if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
advance = queue_max_segment_size(q) - (*sg)->length;
if (advance) {
(*sg)->length += advance;
bvec->bv_offset += advance;
bvec->bv_len -= advance;
}
goto new_segment;
}

(*sg)->length += nbytes;
} else {
new_segment:
Expand All @@ -393,6 +433,10 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,

sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs)++;

/* for making iterator happy */
bvec->bv_offset -= advance;
bvec->bv_len += advance;
}
*bvprv = *bvec;
}
Expand Down

0 comments on commit a2d3796

Please sign in to comment.