Skip to content

Commit

Permalink
blktrace: port to tracepoints
Browse files Browse the repository at this point in the history
This was a forward port of work done by Mathieu Desnoyers, I changed it to
encode the 'what' parameter on the tracepoint name, so that one can register
interest in specific events and not on classes of events to then check the
'what' parameter.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
acmel authored and Ingo Molnar committed Nov 26, 2008
1 parent 509dcee commit 5f3ea37
Show file tree
Hide file tree
Showing 9 changed files with 418 additions and 199 deletions.
1 change: 1 addition & 0 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE
depends on SYSFS
select RELAY
select DEBUG_FS
select TRACEPOINTS
help
Say Y here if you want to be able to trace the block layer actions
on a given queue. Tracing allows you to see any traffic happening
Expand Down
33 changes: 14 additions & 19 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
#include <trace/block.h>

#include "blk.h"

Expand Down Expand Up @@ -205,7 +206,7 @@ void blk_plug_device(struct request_queue *q)

if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
trace_block_plug(q);
}
}
EXPORT_SYMBOL(blk_plug_device);
Expand Down Expand Up @@ -292,19 +293,15 @@ void blk_unplug_work(struct work_struct *work)
struct request_queue *q =
container_of(work, struct request_queue, unplug_work);

blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);

trace_block_unplug_io(q);
q->unplug_fn(q);
}

void blk_unplug_timeout(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;

blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);

trace_block_unplug_timer(q);
kblockd_schedule_work(q, &q->unplug_work);
}

Expand All @@ -314,9 +311,7 @@ void blk_unplug(struct request_queue *q)
* devices don't necessarily have an ->unplug_fn defined
*/
if (q->unplug_fn) {
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);

trace_block_unplug_io(q);
q->unplug_fn(q);
}
}
Expand Down Expand Up @@ -822,7 +817,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;

blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
trace_block_getrq(q, bio, rw);
out:
return rq;
}
Expand All @@ -848,7 +843,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);

blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
trace_block_sleeprq(q, bio, rw);

__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
Expand Down Expand Up @@ -928,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
{
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
trace_block_rq_requeue(q, rq);

if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
Expand Down Expand Up @@ -1167,7 +1162,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!ll_back_merge_fn(q, req, bio))
break;

blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
trace_block_bio_backmerge(q, bio);

req->biotail->bi_next = bio;
req->biotail = bio;
Expand All @@ -1186,7 +1181,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!ll_front_merge_fn(q, req, bio))
break;

blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
trace_block_bio_frontmerge(q, bio);

bio->bi_next = req->bio;
req->bio = bio;
Expand Down Expand Up @@ -1269,7 +1264,7 @@ static inline void blk_partition_remap(struct bio *bio)
bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;

blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev, bio->bi_sector,
bio->bi_sector - p->start_sect);
}
Expand Down Expand Up @@ -1441,10 +1436,10 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;

if (old_sector != -1)
blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
trace_block_remap(q, bio, old_dev, bio->bi_sector,
old_sector);

blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
trace_block_bio_queue(q, bio);

old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
Expand Down Expand Up @@ -1656,7 +1651,7 @@ static int __end_that_request_first(struct request *req, int error,
int total_bytes, bio_nbytes, next_idx = 0;
struct bio *bio;

blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
trace_block_rq_complete(req->q, req);

/*
* for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
Expand Down
Loading

0 comments on commit 5f3ea37

Please sign in to comment.