struct dm_io_client *client;
io_notify_fn callback;
void *context;
- void *vma_invalidate_address;
- unsigned long vma_invalidate_size;
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
static struct kmem_cache *_dm_io_cache;
set_bit(region, &io->error_bits);
if (atomic_dec_and_test(&io->count)) {
- if (io->vma_invalidate_size)
- invalidate_kernel_vmap_range(io->vma_invalidate_address,
- io->vma_invalidate_size);
-
if (io->sleeper)
wake_up_process(io->sleeper);
unsigned context_u;
void *context_ptr;
-
- void *vma_invalidate_address;
- unsigned long vma_invalidate_size;
};
/*
unsigned offset;
unsigned num_bvecs;
sector_t remaining = where->count;
- struct request_queue *q = bdev_get_queue(where->bdev);
- sector_t discard_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
/*
* Allocate a suitably sized-bio.
*/
- if (rw & REQ_DISCARD)
- num_bvecs = 1;
- else
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
- dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
-
+ num_bvecs = dm_sector_div_up(remaining,
+ (PAGE_SIZE >> SECTOR_SHIFT));
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_destructor = dm_bio_destructor;
store_io_and_region_in_bio(bio, io, region);
- if (rw & REQ_DISCARD) {
- discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = discard_sectors << SECTOR_SHIFT;
- remaining -= discard_sectors;
- } else while (remaining) {
- /*
- * Try and add as many pages as possible.
- */
+ /*
+ * Try and add as many pages as possible.
+ */
+ while (remaining) {
dp->get_page(dp, &page, &len, &offset);
len = min(len, to_bytes(remaining));
if (!bio_add_page(bio, page, len, offset))
io->sleeper = current;
io->client = client;
- io->vma_invalidate_address = dp->vma_invalidate_address;
- io->vma_invalidate_size = dp->vma_invalidate_size;
-
dispatch_io(rw, num_regions, where, dp, io, 1);
while (1) {
io->callback = fn;
io->context = context;
- io->vma_invalidate_address = dp->vma_invalidate_address;
- io->vma_invalidate_size = dp->vma_invalidate_size;
-
dispatch_io(rw, num_regions, where, dp, io, 0);
return 0;
}
-static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
- unsigned long size)
+static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
{
/* Set up dpages based on memory type */
-
- dp->vma_invalidate_address = NULL;
- dp->vma_invalidate_size = 0;
-
switch (io_req->mem.type) {
case DM_IO_PAGE_LIST:
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
break;
case DM_IO_VMA:
- flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
- if ((io_req->bi_rw & RW_MASK) == READ) {
- dp->vma_invalidate_address = io_req->mem.ptr.vma;
- dp->vma_invalidate_size = size;
- }
vm_dp_init(dp, io_req->mem.ptr.vma);
break;
int r;
struct dpages dp;
- r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
+ r = dp_init(io_req, &dp);
if (r)
return r;