Skip to content

Commit

Permalink
Merge branch 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block
Browse files Browse the repository at this point in the history
* 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block: (38 commits)
  block: don't access jiffies when initialising io_context
  cfq: remove 8 bytes of padding from cfq_rb_root on 64 bit builds
  block: fix for "Consolidate phys_segment and hw_segment limits"
  cfq-iosched: quantum check tweak
  blktrace: perform cleanup after setup error
  blkdev: fix merge_bvec_fn return value checks
  cfq-iosched: requests "in flight" vs "in driver" clarification
  cciss: Fix problem with scatter gather elements in the scsi half of the driver
  cciss: eliminate unnecessary pointer use in cciss scsi code
  cciss: do not use void pointer for scsi hba data
  cciss: factor out scatter gather chain block mapping code
  cciss: fix scatter gather chain block dma direction kludge
  cciss: simplify scatter gather code
  cciss: factor out scatter gather chain block allocation and freeing
  cciss: detect bad alignment of scsi commands at build time
  cciss: clarify command list padding calculation
  cfq-iosched: rethink seeky detection for SSDs
  cfq-iosched: rework seeky detection
  block: remove padding from io_context on 64bit builds
  block: Consolidate phys_segment and hw_segment limits
  ...
  • Loading branch information
torvalds committed Mar 1, 2010
2 parents 524df55 + 4671a13 commit b1bf936
Show file tree
Hide file tree
Showing 77 changed files with 704 additions and 903 deletions.
14 changes: 14 additions & 0 deletions Documentation/ABI/testing/sysfs-block
Original file line number Diff line number Diff line change
Expand Up @@ -128,3 +128,17 @@ Description:
preferred request size for workloads where sustained
throughput is desired. If no optimal I/O size is
reported this file contains 0.

What: /sys/block/<disk>/queue/nomerges
Date: January 2010
Contact:
Description:
Standard I/O elevator operations include attempts to
merge contiguous I/Os. For known random I/O loads these
attempts will always fail and result in extra cycles
being spent in the kernel. This allows one to turn off
this behavior on one of two ways: When set to 1, complex
merge checks are disabled, but the simple one-shot merges
with the previous I/O request are enabled. When set to 2,
all merge tries are disabled. The default value is 0 -
which enables all types of merge tries.
10 changes: 5 additions & 5 deletions Documentation/block/queue-sysfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ size allowed by the hardware.

nomerges (RW)
-------------
This enables the user to disable the lookup logic involved with IO merging
requests in the block layer. Merging may still occur through a direct
1-hit cache, since that comes for (almost) free. The IO scheduler will not
waste cycles doing tree/hash lookups for merges if nomerges is 1. Defaults
to 0, enabling all merges.
This enables the user to disable the lookup logic involved with IO
merging requests in the block layer. By default (0) all merges are
enabled. When set to 1 only simple one-hit merges will be tried. When
set to 2 no merge algorithms will be tried (including one-hit or more
complex tree/hash lookups).

nr_requests (RW)
----------------
Expand Down
4 changes: 2 additions & 2 deletions arch/um/drivers/ubd_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
ubd_dev->fd = fd;

if(ubd_dev->cow.file != NULL){
blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long));
blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));

err = -ENOMEM;
ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
Expand Down Expand Up @@ -849,7 +849,7 @@ static int ubd_add(int n, char **error_out)
}
ubd_dev->queue->queuedata = ubd_dev;

blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";
Expand Down
14 changes: 0 additions & 14 deletions block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,6 @@ static LIST_HEAD(blkio_list);
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);

bool blkiocg_css_tryget(struct blkio_cgroup *blkcg)
{
if (!css_tryget(&blkcg->css))
return false;
return true;
}
EXPORT_SYMBOL_GPL(blkiocg_css_tryget);

void blkiocg_css_put(struct blkio_cgroup *blkcg)
{
css_put(&blkcg->css);
}
EXPORT_SYMBOL_GPL(blkiocg_css_put);

struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
Expand Down
3 changes: 0 additions & 3 deletions block/blk-cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,6 @@ struct blkio_group {
unsigned long sectors;
};

extern bool blkiocg_css_tryget(struct blkio_cgroup *blkcg);
extern void blkiocg_css_put(struct blkio_cgroup *blkcg);

typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
unsigned int weight);
Expand Down
34 changes: 15 additions & 19 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1490,52 +1490,49 @@ static inline void __generic_make_request(struct bio *bio)
/*
* We only want one ->make_request_fn to be active at a time,
* else stack usage with stacked devices could be a problem.
* So use current->bio_{list,tail} to keep a list of requests
* So use current->bio_list to keep a list of requests
* submited by a make_request_fn function.
* current->bio_tail is also used as a flag to say if
* current->bio_list is also used as a flag to say if
* generic_make_request is currently active in this task or not.
* If it is NULL, then no make_request is active. If it is non-NULL,
* then a make_request is active, and new requests should be added
* at the tail
*/
void generic_make_request(struct bio *bio)
{
if (current->bio_tail) {
struct bio_list bio_list_on_stack;

if (current->bio_list) {
/* make_request is active */
*(current->bio_tail) = bio;
bio->bi_next = NULL;
current->bio_tail = &bio->bi_next;
bio_list_add(current->bio_list, bio);
return;
}
/* following loop may be a bit non-obvious, and so deserves some
* explanation.
* Before entering the loop, bio->bi_next is NULL (as all callers
* ensure that) so we have a list with a single bio.
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to the next (which is NULL) and bio_tail
* to &bio_list, thus initialising the bio_list of new bios to be
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
* added. __generic_make_request may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so fixup bio_list and
* bio_tail or bi_next, and call into __generic_make_request again.
* of the top of the list (no pretending) and so remove it from
* bio_list, and call into __generic_make_request again.
*
* The loop was structured like this to make only one call to
* __generic_make_request (which is important as it is large and
* inlined) and to keep the structure simple.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack);
current->bio_list = &bio_list_on_stack;
do {
current->bio_list = bio->bi_next;
if (bio->bi_next == NULL)
current->bio_tail = &current->bio_list;
else
bio->bi_next = NULL;
__generic_make_request(bio);
bio = current->bio_list;
bio = bio_list_pop(current->bio_list);
} while (bio);
current->bio_tail = NULL; /* deactivate */
current->bio_list = NULL; /* deactivate */
}
EXPORT_SYMBOL(generic_make_request);

Expand Down Expand Up @@ -1617,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
* limitation.
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
rq->nr_phys_segments > queue_max_hw_segments(q)) {
if (rq->nr_phys_segments > queue_max_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}
Expand Down
2 changes: 1 addition & 1 deletion block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
spin_lock_init(&ret->lock);
ret->ioprio_changed = 0;
ret->ioprio = 0;
ret->last_waited = jiffies; /* doesn't matter... */
ret->last_waited = 0; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ret->cic_list);
Expand Down
8 changes: 2 additions & 6 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,7 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);

if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
Expand Down Expand Up @@ -300,10 +299,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--;
}

if (total_phys_segments > queue_max_phys_segments(q))
return 0;

if (total_phys_segments > queue_max_hw_segments(q))
if (total_phys_segments > queue_max_segments(q))
return 0;

/* Merge is OK... */
Expand Down
Loading

0 comments on commit b1bf936

Please sign in to comment.