Skip to content

Commit bf17ce3

Browse files
Milan BrozLinus Torvalds
authored andcommitted
dm io: remove old interface
Remove old dm-io interface. Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 88be163 commit bf17ce3

File tree

2 files changed

+7
-175
lines changed

2 files changed

+7
-175
lines changed

drivers/md/dm-io.c

Lines changed: 4 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@
1313
#include <linux/sched.h>
1414
#include <linux/slab.h>
1515

16-
static struct bio_set *_bios;
17-
1816
struct dm_io_client {
1917
mempool_t *pool;
2018
struct bio_set *bios;
@@ -35,74 +33,12 @@ struct io {
3533
* io. Since async io is likely to be the majority of io we'll
3634
* have the same number of io contexts as bios! (FIXME: must reduce this).
3735
*/
38-
static unsigned _num_ios;
39-
static mempool_t *_io_pool;
40-
41-
/*
42-
* Temporary functions to allow old and new interfaces to co-exist.
43-
*/
44-
static struct bio_set *bios(struct dm_io_client *client)
45-
{
46-
return client ? client->bios : _bios;
47-
}
48-
49-
static mempool_t *io_pool(struct dm_io_client *client)
50-
{
51-
return client ? client->pool : _io_pool;
52-
}
5336

5437
static unsigned int pages_to_ios(unsigned int pages)
5538
{
5639
return 4 * pages; /* too many ? */
5740
}
5841

59-
static int resize_pool(unsigned int new_ios)
60-
{
61-
int r = 0;
62-
63-
if (_io_pool) {
64-
if (new_ios == 0) {
65-
/* free off the pool */
66-
mempool_destroy(_io_pool);
67-
_io_pool = NULL;
68-
bioset_free(_bios);
69-
70-
} else {
71-
/* resize the pool */
72-
r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
73-
}
74-
75-
} else {
76-
/* create new pool */
77-
_io_pool = mempool_create_kmalloc_pool(new_ios,
78-
sizeof(struct io));
79-
if (!_io_pool)
80-
return -ENOMEM;
81-
82-
_bios = bioset_create(16, 16);
83-
if (!_bios) {
84-
mempool_destroy(_io_pool);
85-
_io_pool = NULL;
86-
return -ENOMEM;
87-
}
88-
}
89-
90-
if (!r)
91-
_num_ios = new_ios;
92-
93-
return r;
94-
}
95-
96-
int dm_io_get(unsigned int num_pages)
97-
{
98-
return resize_pool(_num_ios + pages_to_ios(num_pages));
99-
}
100-
101-
void dm_io_put(unsigned int num_pages)
102-
{
103-
resize_pool(_num_ios - pages_to_ios(num_pages));
104-
}
105-
10642
/*
10743
* Create a client with mempool and bioset.
10844
*/
@@ -182,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
182118
io_notify_fn fn = io->callback;
183119
void *context = io->context;
184120

185-
mempool_free(io, io_pool(io->client));
121+
mempool_free(io, io->client->pool);
186122
fn(r, context);
187123
}
188124
}
@@ -310,7 +246,7 @@ static void dm_bio_destructor(struct bio *bio)
310246
{
311247
struct io *io = bio->bi_private;
312248

313-
bio_free(bio, bios(io->client));
249+
bio_free(bio, io->client->bios);
314250
}
315251

316252
/*
@@ -358,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
358294
* to hide it from bio_add_page().
359295
*/
360296
num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
361-
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, bios(io->client));
297+
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
362298
bio->bi_sector = where->sector + (where->count - remaining);
363299
bio->bi_bdev = where->bdev;
364300
bio->bi_end_io = endio;
@@ -462,7 +398,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
462398
return -EIO;
463399
}
464400

465-
io = mempool_alloc(io_pool(client), GFP_NOIO);
401+
io = mempool_alloc(client->pool, GFP_NOIO);
466402
io->error = 0;
467403
atomic_set(&io->count, 1); /* see dispatch_io() */
468404
io->sleeper = NULL;
@@ -474,56 +410,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
474410
return 0;
475411
}
476412

477-
int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
478-
struct page_list *pl, unsigned int offset,
479-
unsigned long *error_bits)
480-
{
481-
struct dpages dp;
482-
list_dp_init(&dp, pl, offset);
483-
return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
484-
}
485-
486-
int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
487-
struct bio_vec *bvec, unsigned long *error_bits)
488-
{
489-
struct dpages dp;
490-
bvec_dp_init(&dp, bvec);
491-
return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
492-
}
493-
494-
int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
495-
void *data, unsigned long *error_bits)
496-
{
497-
struct dpages dp;
498-
vm_dp_init(&dp, data);
499-
return sync_io(NULL, num_regions, where, rw, &dp, error_bits);
500-
}
501-
502-
int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
503-
struct page_list *pl, unsigned int offset,
504-
io_notify_fn fn, void *context)
505-
{
506-
struct dpages dp;
507-
list_dp_init(&dp, pl, offset);
508-
return async_io(NULL, num_regions, where, rw, &dp, fn, context);
509-
}
510-
511-
int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
512-
struct bio_vec *bvec, io_notify_fn fn, void *context)
513-
{
514-
struct dpages dp;
515-
bvec_dp_init(&dp, bvec);
516-
return async_io(NULL, num_regions, where, rw, &dp, fn, context);
517-
}
518-
519-
int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
520-
void *data, io_notify_fn fn, void *context)
521-
{
522-
struct dpages dp;
523-
vm_dp_init(&dp, data);
524-
return async_io(NULL, num_regions, where, rw, &dp, fn, context);
525-
}
526-
527413
static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
528414
{
529415
/* Set up dpages based on memory type */
@@ -572,12 +458,3 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
572458
&dp, io_req->notify.fn, io_req->notify.context);
573459
}
574460
EXPORT_SYMBOL(dm_io);
575-
576-
EXPORT_SYMBOL(dm_io_get);
577-
EXPORT_SYMBOL(dm_io_put);
578-
EXPORT_SYMBOL(dm_io_sync);
579-
EXPORT_SYMBOL(dm_io_async);
580-
EXPORT_SYMBOL(dm_io_sync_bvec);
581-
EXPORT_SYMBOL(dm_io_async_bvec);
582-
EXPORT_SYMBOL(dm_io_sync_vm);
583-
EXPORT_SYMBOL(dm_io_async_vm);

drivers/md/dm-io.h

Lines changed: 3 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,14 @@
1212
struct io_region {
1313
struct block_device *bdev;
1414
sector_t sector;
15-
sector_t count;
15+
sector_t count; /* If this is zero the region is ignored. */
1616
};
1717

1818
struct page_list {
1919
struct page_list *next;
2020
struct page *page;
2121
};
2222

23-
/*
24-
* 'error' is a bitset, with each bit indicating whether an error
25-
* occurred doing io to the corresponding region.
26-
*/
2723
typedef void (*io_notify_fn)(unsigned long error, void *context);
2824

2925
enum dm_io_mem_type {
@@ -62,16 +58,6 @@ struct dm_io_request {
6258
struct dm_io_client *client; /* Client memory handler */
6359
};
6460

65-
/*
66-
* Before anyone uses the IO interface they should call
67-
* dm_io_get(), specifying roughly how many pages they are
68-
* expecting to perform io on concurrently.
69-
*
70-
* This function may block.
71-
*/
72-
int dm_io_get(unsigned int num_pages);
73-
void dm_io_put(unsigned int num_pages);
74-
7561
/*
7662
* For async io calls, users can alternatively use the dm_io() function below
7763
* and dm_io_client_create() to create private mempools for the client.
@@ -82,41 +68,10 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages);
8268
int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
8369
void dm_io_client_destroy(struct dm_io_client *client);
8470

85-
/*
86-
* Synchronous IO.
87-
*
88-
* Please ensure that the rw flag in the next two functions is
89-
* either READ or WRITE, ie. we don't take READA. Any
90-
* regions with a zero count field will be ignored.
91-
*/
92-
int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
93-
struct page_list *pl, unsigned int offset,
94-
unsigned long *error_bits);
95-
96-
int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
97-
struct bio_vec *bvec, unsigned long *error_bits);
98-
99-
int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
100-
void *data, unsigned long *error_bits);
101-
102-
/*
103-
* Aynchronous IO.
104-
*
105-
* The 'where' array may be safely allocated on the stack since
106-
* the function takes a copy.
107-
*/
108-
int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
109-
struct page_list *pl, unsigned int offset,
110-
io_notify_fn fn, void *context);
111-
112-
int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
113-
struct bio_vec *bvec, io_notify_fn fn, void *context);
114-
115-
int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
116-
void *data, io_notify_fn fn, void *context);
117-
11871
/*
11972
* IO interface using private per-client pools.
73+
* Each bit in the optional 'sync_error_bits' bitset indicates whether an
74+
* error occurred doing io to the corresponding region.
12075
*/
12176
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
12277
struct io_region *region, unsigned long *sync_error_bits);

0 commit comments

Comments
 (0)