UPSTREAM: mmc: core: update mmc.c upstream version
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-snap-persistent.c
index 3ac415675b6c778b5dd22aaaf4ee6c2dc4ca48eb..4d3909393f2cce5488ced8843ccab1375d40d2d9 100644 (file)
@@ -7,16 +7,20 @@
 
 #include "dm-exception-store.h"
 
+#include <linux/ctype.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/dm-io.h>
+#include "dm-bufio.h"
 
 #define DM_MSG_PREFIX "persistent snapshot"
 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32       /* 16KB */
 
+#define DM_PREFETCH_CHUNKS             12
+
 /*-----------------------------------------------------------------
  * Persistent snapshots, by persistent we mean that the snapshot
  * will survive a reboot.
@@ -197,16 +201,11 @@ err_area:
 
 static void free_area(struct pstore *ps)
 {
-       if (ps->area)
-               vfree(ps->area);
+       vfree(ps->area);
        ps->area = NULL;
-
-       if (ps->zero_area)
-               vfree(ps->zero_area);
+       vfree(ps->zero_area);
        ps->zero_area = NULL;
-
-       if (ps->header_area)
-               vfree(ps->header_area);
+       vfree(ps->header_area);
        ps->header_area = NULL;
 }
 
@@ -256,7 +255,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         */
        INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
-       flush_work(&req.work);
+       flush_workqueue(ps->metadata_wq);
+       destroy_work_on_stack(&req.work);
 
        return req.result;
 }
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
        return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 }
 
+static void skip_metadata(struct pstore *ps)
+{
+       uint32_t stride = ps->exceptions_per_area + 1;
+       chunk_t next_free = ps->next_free;
+       if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
+               ps->next_free++;
+}
+
 /*
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
@@ -314,7 +322,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
                    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
                                            bdev) >> 9);
                ps->store->chunk_mask = ps->store->chunk_size - 1;
-               ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
+               ps->store->chunk_shift = __ffs(ps->store->chunk_size);
                chunk_size_supplied = 0;
        }
 
@@ -393,17 +401,18 @@ static int write_header(struct pstore *ps)
 /*
  * Access functions for the disk exceptions, these do the endian conversions.
  */
-static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
+static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
+                                           uint32_t index)
 {
        BUG_ON(index >= ps->exceptions_per_area);
 
-       return ((struct disk_exception *) ps->area) + index;
+       return ((struct disk_exception *) ps_area) + index;
 }
 
-static void read_exception(struct pstore *ps,
+static void read_exception(struct pstore *ps, void *ps_area,
                           uint32_t index, struct core_exception *result)
 {
-       struct disk_exception *de = get_exception(ps, index);
+       struct disk_exception *de = get_exception(ps, ps_area, index);
 
        /* copy it */
        result->old_chunk = le64_to_cpu(de->old_chunk);
@@ -413,7 +422,7 @@ static void read_exception(struct pstore *ps,
 static void write_exception(struct pstore *ps,
                            uint32_t index, struct core_exception *e)
 {
-       struct disk_exception *de = get_exception(ps, index);
+       struct disk_exception *de = get_exception(ps, ps->area, index);
 
        /* copy it */
        de->old_chunk = cpu_to_le64(e->old_chunk);
@@ -422,7 +431,7 @@ static void write_exception(struct pstore *ps,
 
 static void clear_exception(struct pstore *ps, uint32_t index)
 {
-       struct disk_exception *de = get_exception(ps, index);
+       struct disk_exception *de = get_exception(ps, ps->area, index);
 
        /* clear it */
        de->old_chunk = 0;
@@ -434,7 +443,7 @@ static void clear_exception(struct pstore *ps, uint32_t index)
  * 'full' is filled in to indicate if the area has been
  * filled.
  */
-static int insert_exceptions(struct pstore *ps,
+static int insert_exceptions(struct pstore *ps, void *ps_area,
                             int (*callback)(void *callback_context,
                                             chunk_t old, chunk_t new),
                             void *callback_context,
@@ -448,7 +457,7 @@ static int insert_exceptions(struct pstore *ps,
        *full = 1;
 
        for (i = 0; i < ps->exceptions_per_area; i++) {
-               read_exception(ps, i, &e);
+               read_exception(ps, ps_area, i, &e);
 
                /*
                 * If the new_chunk is pointing at the start of
@@ -485,24 +494,75 @@ static int read_exceptions(struct pstore *ps,
                           void *callback_context)
 {
        int r, full = 1;
+       struct dm_bufio_client *client;
+       chunk_t prefetch_area = 0;
+
+       client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
+                                       ps->store->chunk_size << SECTOR_SHIFT,
+                                       1, 0, NULL, NULL);
+
+       if (IS_ERR(client))
+               return PTR_ERR(client);
+
+       /*
+        * Setup for one current buffer + desired readahead buffers.
+        */
+       dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
 
        /*
         * Keeping reading chunks and inserting exceptions until
         * we find a partially full area.
         */
        for (ps->current_area = 0; full; ps->current_area++) {
-               r = area_io(ps, READ);
-               if (r)
-                       return r;
+               struct dm_buffer *bp;
+               void *area;
+               chunk_t chunk;
+
+               if (unlikely(prefetch_area < ps->current_area))
+                       prefetch_area = ps->current_area;
+
+               if (DM_PREFETCH_CHUNKS) do {
+                       chunk_t pf_chunk = area_location(ps, prefetch_area);
+                       if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
+                               break;
+                       dm_bufio_prefetch(client, pf_chunk, 1);
+                       prefetch_area++;
+                       if (unlikely(!prefetch_area))
+                               break;
+               } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
+
+               chunk = area_location(ps, ps->current_area);
+
+               area = dm_bufio_read(client, chunk, &bp);
+               if (IS_ERR(area)) {
+                       r = PTR_ERR(area);
+                       goto ret_destroy_bufio;
+               }
 
-               r = insert_exceptions(ps, callback, callback_context, &full);
-               if (r)
-                       return r;
+               r = insert_exceptions(ps, area, callback, callback_context,
+                                     &full);
+
+               if (!full)
+                       memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
+
+               dm_bufio_release(bp);
+
+               dm_bufio_forget(client, chunk);
+
+               if (unlikely(r))
+                       goto ret_destroy_bufio;
        }
 
        ps->current_area--;
 
-       return 0;
+       skip_metadata(ps);
+
+       r = 0;
+
+ret_destroy_bufio:
+       dm_bufio_client_destroy(client);
+
+       return r;
 }
 
 static struct pstore *get_info(struct dm_exception_store *store)
@@ -541,8 +601,7 @@ static void persistent_dtr(struct dm_exception_store *store)
        free_area(ps);
 
        /* Allocated in persistent_read_metadata */
-       if (ps->callbacks)
-               vfree(ps->callbacks);
+       vfree(ps->callbacks);
 
        kfree(ps);
 }
@@ -616,8 +675,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
                                        struct dm_exception *e)
 {
        struct pstore *ps = get_info(store);
-       uint32_t stride;
-       chunk_t next_free;
        sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 
        /* Is there enough room ? */
@@ -630,17 +687,15 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
         * Move onto the next free pending, making sure to take
         * into account the location of the metadata chunks.
         */
-       stride = (ps->exceptions_per_area + 1);
-       next_free = ++ps->next_free;
-       if (sector_div(next_free, stride) == 1)
-               ps->next_free++;
+       ps->next_free++;
+       skip_metadata(ps);
 
        atomic_inc(&ps->pending_count);
        return 0;
 }
 
 static void persistent_commit_exception(struct dm_exception_store *store,
-                                       struct dm_exception *e,
+                                       struct dm_exception *e, int valid,
                                        void (*callback) (void *, int success),
                                        void *callback_context)
 {
@@ -649,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        struct core_exception ce;
        struct commit_callback *cb;
 
+       if (!valid)
+               ps->valid = 0;
+
        ce.old_chunk = e->old_chunk;
        ce.new_chunk = e->new_chunk;
        write_exception(ps, ps->current_committed++, &ce);
@@ -727,7 +785,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
                ps->current_committed = ps->exceptions_per_area;
        }
 
-       read_exception(ps, ps->current_committed - 1, &ce);
+       read_exception(ps, ps->area, ps->current_committed - 1, &ce);
        *last_old_chunk = ce.old_chunk;
        *last_new_chunk = ce.new_chunk;
 
@@ -737,8 +795,8 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
         */
        for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
             nr_consecutive++) {
-               read_exception(ps, ps->current_committed - 1 - nr_consecutive,
-                              &ce);
+               read_exception(ps, ps->area,
+                              ps->current_committed - 1 - nr_consecutive, &ce);
                if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
                    ce.new_chunk != *last_new_chunk - nr_consecutive)
                        break;
@@ -789,10 +847,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
                DMWARN("write header failed");
 }
 
-static int persistent_ctr(struct dm_exception_store *store,
-                         unsigned argc, char **argv)
+static int persistent_ctr(struct dm_exception_store *store, char *options)
 {
        struct pstore *ps;
+       int r;
 
        /* allocate the pstore */
        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
@@ -814,14 +872,32 @@ static int persistent_ctr(struct dm_exception_store *store,
 
        ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
        if (!ps->metadata_wq) {
-               kfree(ps);
                DMERR("couldn't start header metadata update thread");
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto err_workqueue;
+       }
+
+       if (options) {
+               char overflow = toupper(options[0]);
+               if (overflow == 'O')
+                       store->userspace_supports_overflow = true;
+               else {
+                       DMERR("Unsupported persistent store option: %s", options);
+                       r = -EINVAL;
+                       goto err_options;
+               }
        }
 
        store->context = ps;
 
        return 0;
+
+err_options:
+       destroy_workqueue(ps->metadata_wq);
+err_workqueue:
+       kfree(ps);
+
+       return r;
 }
 
 static unsigned persistent_status(struct dm_exception_store *store,
@@ -834,7 +910,8 @@ static unsigned persistent_status(struct dm_exception_store *store,
        case STATUSTYPE_INFO:
                break;
        case STATUSTYPE_TABLE:
-               DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+               DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
+                      (unsigned long long)store->chunk_size);
        }
 
        return sz;