2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/completion.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dm-io.h>
20 #define DM_MSG_PREFIX "io"
22 #define DM_IO_MAX_REGIONS BITS_PER_LONG
32 * Aligning 'struct io' reduces the number of bits required to store
33 * its address. Refer to store_io_and_region_in_bio() below.
36 unsigned long error_bits;
38 struct completion *wait;
39 struct dm_io_client *client;
40 io_notify_fn callback;
42 void *vma_invalidate_address;
43 unsigned long vma_invalidate_size;
44 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
46 static struct kmem_cache *_dm_io_cache;
49 * Create a client with mempool and bioset.
51 struct dm_io_client *dm_io_client_create(void)
53 struct dm_io_client *client;
55 client = kmalloc(sizeof(*client), GFP_KERNEL);
57 return ERR_PTR(-ENOMEM);
59 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
63 client->bios = bioset_create(MIN_BIOS, 0);
71 mempool_destroy(client->pool);
73 return ERR_PTR(-ENOMEM);
75 EXPORT_SYMBOL(dm_io_client_create);
77 void dm_io_client_destroy(struct dm_io_client *client)
79 mempool_destroy(client->pool);
80 bioset_free(client->bios);
83 EXPORT_SYMBOL(dm_io_client_destroy);
85 /*-----------------------------------------------------------------
86 * We need to keep track of which region a bio is doing io for.
87 * To avoid a memory allocation to store just 5 or 6 bits, we
88 * ensure the 'struct io' pointer is aligned so enough low bits are
89 * always zero and then combine it with the region number directly in
91 *---------------------------------------------------------------*/
92 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
95 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
96 DMCRIT("Unaligned struct io pointer %p", io);
100 bio->bi_private = (void *)((unsigned long)io | region);
103 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
106 unsigned long val = (unsigned long)bio->bi_private;
108 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
109 *region = val & (DM_IO_MAX_REGIONS - 1);
112 /*-----------------------------------------------------------------
113 * We need an io object to keep track of the number of bios that
114 * have been dispatched for a particular io.
115 *---------------------------------------------------------------*/
116 static void dec_count(struct io *io, unsigned int region, int error)
119 set_bit(region, &io->error_bits);
121 if (atomic_dec_and_test(&io->count)) {
122 if (io->vma_invalidate_size)
123 invalidate_kernel_vmap_range(io->vma_invalidate_address,
124 io->vma_invalidate_size);
130 unsigned long r = io->error_bits;
131 io_notify_fn fn = io->callback;
132 void *context = io->context;
134 mempool_free(io, io->client->pool);
140 static void endio(struct bio *bio, int error)
145 if (error && bio_data_dir(bio) == READ)
149 * The bio destructor in bio_put() may use the io object.
151 retrieve_io_and_region_from_bio(bio, &io, ®ion);
155 dec_count(io, region, error);
158 /*-----------------------------------------------------------------
159 * These little objects provide an abstraction for getting a new
160 * destination page for io.
161 *---------------------------------------------------------------*/
163 void (*get_page)(struct dpages *dp,
164 struct page **p, unsigned long *len, unsigned *offset);
165 void (*next_page)(struct dpages *dp);
170 void *vma_invalidate_address;
171 unsigned long vma_invalidate_size;
175 * Functions for getting the pages from a list.
177 static void list_get_page(struct dpages *dp,
178 struct page **p, unsigned long *len, unsigned *offset)
180 unsigned o = dp->context_u;
181 struct page_list *pl = (struct page_list *) dp->context_ptr;
184 *len = PAGE_SIZE - o;
188 static void list_next_page(struct dpages *dp)
190 struct page_list *pl = (struct page_list *) dp->context_ptr;
191 dp->context_ptr = pl->next;
195 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
197 dp->get_page = list_get_page;
198 dp->next_page = list_next_page;
199 dp->context_u = offset;
200 dp->context_ptr = pl;
204 * Functions for getting the pages from a bvec.
206 static void bvec_get_page(struct dpages *dp,
207 struct page **p, unsigned long *len, unsigned *offset)
209 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
212 *offset = bvec->bv_offset;
215 static void bvec_next_page(struct dpages *dp)
217 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
218 dp->context_ptr = bvec + 1;
221 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
223 dp->get_page = bvec_get_page;
224 dp->next_page = bvec_next_page;
225 dp->context_ptr = bvec;
229 * Functions for getting the pages from a VMA.
231 static void vm_get_page(struct dpages *dp,
232 struct page **p, unsigned long *len, unsigned *offset)
234 *p = vmalloc_to_page(dp->context_ptr);
235 *offset = dp->context_u;
236 *len = PAGE_SIZE - dp->context_u;
239 static void vm_next_page(struct dpages *dp)
241 dp->context_ptr += PAGE_SIZE - dp->context_u;
245 static void vm_dp_init(struct dpages *dp, void *data)
247 dp->get_page = vm_get_page;
248 dp->next_page = vm_next_page;
249 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
250 dp->context_ptr = data;
254 * Functions for getting the pages from kernel memory.
256 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
259 *p = virt_to_page(dp->context_ptr);
260 *offset = dp->context_u;
261 *len = PAGE_SIZE - dp->context_u;
264 static void km_next_page(struct dpages *dp)
266 dp->context_ptr += PAGE_SIZE - dp->context_u;
270 static void km_dp_init(struct dpages *dp, void *data)
272 dp->get_page = km_get_page;
273 dp->next_page = km_next_page;
274 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
275 dp->context_ptr = data;
278 /*-----------------------------------------------------------------
279 * IO routines that accept a list of pages.
280 *---------------------------------------------------------------*/
281 static void do_region(int rw, unsigned region, struct dm_io_region *where,
282 struct dpages *dp, struct io *io)
289 sector_t remaining = where->count;
290 struct request_queue *q = bdev_get_queue(where->bdev);
291 unsigned short logical_block_size = queue_logical_block_size(q);
292 sector_t num_sectors;
295 * where->count may be zero if rw holds a flush and we need to
296 * send a zero-sized flush.
300 * Allocate a suitably sized-bio.
302 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
305 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
306 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
308 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
309 bio->bi_sector = where->sector + (where->count - remaining);
310 bio->bi_bdev = where->bdev;
311 bio->bi_end_io = endio;
312 store_io_and_region_in_bio(bio, io, region);
314 if (rw & REQ_DISCARD) {
315 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
316 bio->bi_size = num_sectors << SECTOR_SHIFT;
317 remaining -= num_sectors;
318 } else if (rw & REQ_WRITE_SAME) {
320 * WRITE SAME only uses a single page.
322 dp->get_page(dp, &page, &len, &offset);
323 bio_add_page(bio, page, logical_block_size, offset);
324 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
325 bio->bi_size = num_sectors << SECTOR_SHIFT;
328 remaining -= num_sectors;
330 } else while (remaining) {
332 * Try and add as many pages as possible.
334 dp->get_page(dp, &page, &len, &offset);
335 len = min(len, to_bytes(remaining));
336 if (!bio_add_page(bio, page, len, offset))
340 remaining -= to_sector(len);
344 atomic_inc(&io->count);
349 static void dispatch_io(int rw, unsigned int num_regions,
350 struct dm_io_region *where, struct dpages *dp,
351 struct io *io, int sync)
354 struct dpages old_pages = *dp;
356 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
362 * For multiple regions we need to be careful to rewind
363 * the dp object for each call to do_region.
365 for (i = 0; i < num_regions; i++) {
367 if (where[i].count || (rw & REQ_FLUSH))
368 do_region(rw, i, where + i, dp, io);
372 * Drop the extra reference that we were holding to avoid
373 * the io being completed too early.
378 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
379 struct dm_io_region *where, int rw, struct dpages *dp,
380 unsigned long *error_bits)
383 * gcc <= 4.3 can't do the alignment for stack variables, so we must
384 * align it on our own.
385 * volatile prevents the optimizer from removing or reusing
386 * "io_" field from the stack frame (allowed in ANSI C).
388 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
389 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
390 DECLARE_COMPLETION_ONSTACK(wait);
392 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
398 atomic_set(&io->count, 1); /* see dispatch_io() */
402 io->vma_invalidate_address = dp->vma_invalidate_address;
403 io->vma_invalidate_size = dp->vma_invalidate_size;
405 dispatch_io(rw, num_regions, where, dp, io, 1);
407 wait_for_completion_io(&wait);
410 *error_bits = io->error_bits;
412 return io->error_bits ? -EIO : 0;
415 static int async_io(struct dm_io_client *client, unsigned int num_regions,
416 struct dm_io_region *where, int rw, struct dpages *dp,
417 io_notify_fn fn, void *context)
421 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
427 io = mempool_alloc(client->pool, GFP_NOIO);
429 atomic_set(&io->count, 1); /* see dispatch_io() */
433 io->context = context;
435 io->vma_invalidate_address = dp->vma_invalidate_address;
436 io->vma_invalidate_size = dp->vma_invalidate_size;
438 dispatch_io(rw, num_regions, where, dp, io, 0);
442 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
445 /* Set up dpages based on memory type */
447 dp->vma_invalidate_address = NULL;
448 dp->vma_invalidate_size = 0;
450 switch (io_req->mem.type) {
451 case DM_IO_PAGE_LIST:
452 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
456 bvec_dp_init(dp, io_req->mem.ptr.bvec);
460 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
461 if ((io_req->bi_rw & RW_MASK) == READ) {
462 dp->vma_invalidate_address = io_req->mem.ptr.vma;
463 dp->vma_invalidate_size = size;
465 vm_dp_init(dp, io_req->mem.ptr.vma);
469 km_dp_init(dp, io_req->mem.ptr.addr);
480 * New collapsed (a)synchronous interface.
482 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
483 * the queue with blk_unplug() some time later or set REQ_SYNC in
484 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
485 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
487 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
488 struct dm_io_region *where, unsigned long *sync_error_bits)
493 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
497 if (!io_req->notify.fn)
498 return sync_io(io_req->client, num_regions, where,
499 io_req->bi_rw, &dp, sync_error_bits);
501 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
502 &dp, io_req->notify.fn, io_req->notify.context);
504 EXPORT_SYMBOL(dm_io);
506 int __init dm_io_init(void)
508 _dm_io_cache = KMEM_CACHE(io, 0);
515 void dm_io_exit(void)
517 kmem_cache_destroy(_dm_io_cache);