soc: rockchip: add cpuinfo support
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-bufio.c
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
19
20 #define DM_MSG_PREFIX "bufio"
21
22 /*
23  * Memory management policy:
24  *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
25  *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
26  *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
27  *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28  *      dirty buffers.
29  */
30 #define DM_BUFIO_MIN_BUFFERS            8
31
32 #define DM_BUFIO_MEMORY_PERCENT         2
33 #define DM_BUFIO_VMALLOC_PERCENT        25
34 #define DM_BUFIO_WRITEBACK_PERCENT      75
35
36 /*
37  * Check buffer ages in this interval (seconds)
38  */
39 #define DM_BUFIO_WORK_TIMER_SECS        30
40
41 /*
42  * Free buffers when they are older than this (seconds)
43  */
44 #define DM_BUFIO_DEFAULT_AGE_SECS       300
45
46 /*
47  * The nr of bytes of cached data to keep around.
48  */
49 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
50
51 /*
52  * The number of bvec entries that are embedded directly in the buffer.
53  * If the chunk size is larger, dm-io is used to do the io.
54  */
55 #define DM_BUFIO_INLINE_VECS            16
56
57 /*
58  * Don't try to use kmem_cache_alloc for blocks larger than this.
59  * For explanation, see alloc_buffer_data below.
60  */
61 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT  (PAGE_SIZE >> 1)
62 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT   (PAGE_SIZE << (MAX_ORDER - 1))
63
64 /*
65  * dm_buffer->list_mode
66  */
67 #define LIST_CLEAN      0
68 #define LIST_DIRTY      1
69 #define LIST_SIZE       2
70
71 /*
72  * Linking of buffers:
73  *      All buffers are linked to cache_hash with their hash_list field.
74  *
75  *      Clean buffers that are not being written (B_WRITING not set)
76  *      are linked to lru[LIST_CLEAN] with their lru_list field.
77  *
78  *      Dirty and clean buffers that are being written are linked to
79  *      lru[LIST_DIRTY] with their lru_list field. When the write
80  *      finishes, the buffer cannot be relinked immediately (because we
81  *      are in an interrupt context and relinking requires process
82  *      context), so some clean-not-writing buffers can be held on
83  *      dirty_lru too.  They are later added to lru in the process
84  *      context.
85  */
86 struct dm_bufio_client {
87         struct mutex lock;
88
89         struct list_head lru[LIST_SIZE];
90         unsigned long n_buffers[LIST_SIZE];
91
92         struct block_device *bdev;
93         unsigned block_size;
94         unsigned char sectors_per_block_bits;
95         unsigned char pages_per_block_bits;
96         unsigned char blocks_per_page_bits;
97         unsigned aux_size;
98         void (*alloc_callback)(struct dm_buffer *);
99         void (*write_callback)(struct dm_buffer *);
100
101         struct dm_io_client *dm_io;
102
103         struct list_head reserved_buffers;
104         unsigned need_reserved_buffers;
105
106         unsigned minimum_buffers;
107
108         struct rb_root buffer_tree;
109         wait_queue_head_t free_buffer_wait;
110
111         int async_write_error;
112
113         struct list_head client_list;
114         struct shrinker shrinker;
115 };
116
117 /*
118  * Buffer state bits.
119  */
120 #define B_READING       0
121 #define B_WRITING       1
122 #define B_DIRTY         2
123
124 /*
125  * Describes how the block was allocated:
126  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
127  * See the comment at alloc_buffer_data.
128  */
129 enum data_mode {
130         DATA_MODE_SLAB = 0,
131         DATA_MODE_GET_FREE_PAGES = 1,
132         DATA_MODE_VMALLOC = 2,
133         DATA_MODE_LIMIT = 3
134 };
135
136 struct dm_buffer {
137         struct rb_node node;
138         struct list_head lru_list;
139         sector_t block;
140         void *data;
141         enum data_mode data_mode;
142         unsigned char list_mode;                /* LIST_* */
143         unsigned hold_count;
144         int read_error;
145         int write_error;
146         unsigned long state;
147         unsigned long last_accessed;
148         struct dm_bufio_client *c;
149         struct list_head write_list;
150         struct bio bio;
151         struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
152 };
153
154 /*----------------------------------------------------------------*/
155
156 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
157 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
158
159 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
160 {
161         unsigned ret = c->blocks_per_page_bits - 1;
162
163         BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
164
165         return ret;
166 }
167
168 #define DM_BUFIO_CACHE(c)       (dm_bufio_caches[dm_bufio_cache_index(c)])
169 #define DM_BUFIO_CACHE_NAME(c)  (dm_bufio_cache_names[dm_bufio_cache_index(c)])
170
171 #define dm_bufio_in_request()   (!!current->bio_list)
172
173 static void dm_bufio_lock(struct dm_bufio_client *c)
174 {
175         mutex_lock_nested(&c->lock, dm_bufio_in_request());
176 }
177
178 static int dm_bufio_trylock(struct dm_bufio_client *c)
179 {
180         return mutex_trylock(&c->lock);
181 }
182
183 static void dm_bufio_unlock(struct dm_bufio_client *c)
184 {
185         mutex_unlock(&c->lock);
186 }
187
188 /*
189  * FIXME Move to sched.h?
190  */
191 #ifdef CONFIG_PREEMPT_VOLUNTARY
192 #  define dm_bufio_cond_resched()               \
193 do {                                            \
194         if (unlikely(need_resched()))           \
195                 _cond_resched();                \
196 } while (0)
197 #else
198 #  define dm_bufio_cond_resched()                do { } while (0)
199 #endif
200
201 /*----------------------------------------------------------------*/
202
203 /*
204  * Default cache size: available memory divided by the ratio.
205  */
206 static unsigned long dm_bufio_default_cache_size;
207
208 /*
209  * Total cache size set by the user.
210  */
211 static unsigned long dm_bufio_cache_size;
212
213 /*
214  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
215  * at any time.  If it disagrees, the user has changed cache size.
216  */
217 static unsigned long dm_bufio_cache_size_latch;
218
219 static DEFINE_SPINLOCK(param_spinlock);
220
221 /*
222  * Buffers are freed after this timeout
223  */
224 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
225 static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
226
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
232
233 /*----------------------------------------------------------------*/
234
235 /*
236  * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
237  */
238 static unsigned long dm_bufio_cache_size_per_client;
239
240 /*
241  * The current number of clients.
242  */
243 static int dm_bufio_client_count;
244
245 /*
246  * The list of all clients.
247  */
248 static LIST_HEAD(dm_bufio_all_clients);
249
250 /*
251  * This mutex protects dm_bufio_cache_size_latch,
252  * dm_bufio_cache_size_per_client and dm_bufio_client_count
253  */
254 static DEFINE_MUTEX(dm_bufio_clients_lock);
255
256 /*----------------------------------------------------------------
257  * A red/black tree acts as an index for all the buffers.
258  *--------------------------------------------------------------*/
259 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
260 {
261         struct rb_node *n = c->buffer_tree.rb_node;
262         struct dm_buffer *b;
263
264         while (n) {
265                 b = container_of(n, struct dm_buffer, node);
266
267                 if (b->block == block)
268                         return b;
269
270                 n = (b->block < block) ? n->rb_left : n->rb_right;
271         }
272
273         return NULL;
274 }
275
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
277 {
278         struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
279         struct dm_buffer *found;
280
281         while (*new) {
282                 found = container_of(*new, struct dm_buffer, node);
283
284                 if (found->block == b->block) {
285                         BUG_ON(found != b);
286                         return;
287                 }
288
289                 parent = *new;
290                 new = (found->block < b->block) ?
291                         &((*new)->rb_left) : &((*new)->rb_right);
292         }
293
294         rb_link_node(&b->node, parent, new);
295         rb_insert_color(&b->node, &c->buffer_tree);
296 }
297
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
299 {
300         rb_erase(&b->node, &c->buffer_tree);
301 }
302
303 /*----------------------------------------------------------------*/
304
305 static void adjust_total_allocated(enum data_mode data_mode, long diff)
306 {
307         static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
308                 &dm_bufio_allocated_kmem_cache,
309                 &dm_bufio_allocated_get_free_pages,
310                 &dm_bufio_allocated_vmalloc,
311         };
312
313         spin_lock(&param_spinlock);
314
315         *class_ptr[data_mode] += diff;
316
317         dm_bufio_current_allocated += diff;
318
319         if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
320                 dm_bufio_peak_allocated = dm_bufio_current_allocated;
321
322         spin_unlock(&param_spinlock);
323 }
324
325 /*
326  * Change the number of clients and recalculate per-client limit.
327  */
328 static void __cache_size_refresh(void)
329 {
330         BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
331         BUG_ON(dm_bufio_client_count < 0);
332
333         dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
334
335         /*
336          * Use default if set to 0 and report the actual cache size used.
337          */
338         if (!dm_bufio_cache_size_latch) {
339                 (void)cmpxchg(&dm_bufio_cache_size, 0,
340                               dm_bufio_default_cache_size);
341                 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
342         }
343
344         dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
345                                          (dm_bufio_client_count ? : 1);
346 }
347
348 /*
349  * Allocating buffer data.
350  *
351  * Small buffers are allocated with kmem_cache, to use space optimally.
352  *
353  * For large buffers, we choose between get_free_pages and vmalloc.
354  * Each has advantages and disadvantages.
355  *
356  * __get_free_pages can randomly fail if the memory is fragmented.
357  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
358  * as low as 128M) so using it for caching is not appropriate.
359  *
360  * If the allocation may fail we use __get_free_pages. Memory fragmentation
361  * won't have a fatal effect here, but it just causes flushes of some other
362  * buffers and more I/O will be performed. Don't use __get_free_pages if it
363  * always fails (i.e. order >= MAX_ORDER).
364  *
365  * If the allocation shouldn't fail we use __vmalloc. This is only for the
366  * initial reserve allocation, so there's no risk of wasting all vmalloc
367  * space.
368  */
369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
370                                enum data_mode *data_mode)
371 {
372         unsigned noio_flag;
373         void *ptr;
374
375         if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
376                 *data_mode = DATA_MODE_SLAB;
377                 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
378         }
379
380         if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
381             gfp_mask & __GFP_NORETRY) {
382                 *data_mode = DATA_MODE_GET_FREE_PAGES;
383                 return (void *)__get_free_pages(gfp_mask,
384                                                 c->pages_per_block_bits);
385         }
386
387         *data_mode = DATA_MODE_VMALLOC;
388
389         /*
390          * __vmalloc allocates the data pages and auxiliary structures with
391          * gfp_flags that were specified, but pagetables are always allocated
392          * with GFP_KERNEL, no matter what was specified as gfp_mask.
393          *
394          * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
395          * all allocations done by this process (including pagetables) are done
396          * as if GFP_NOIO was specified.
397          */
398
399         if (gfp_mask & __GFP_NORETRY)
400                 noio_flag = memalloc_noio_save();
401
402         ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
403
404         if (gfp_mask & __GFP_NORETRY)
405                 memalloc_noio_restore(noio_flag);
406
407         return ptr;
408 }
409
410 /*
411  * Free buffer's data.
412  */
413 static void free_buffer_data(struct dm_bufio_client *c,
414                              void *data, enum data_mode data_mode)
415 {
416         switch (data_mode) {
417         case DATA_MODE_SLAB:
418                 kmem_cache_free(DM_BUFIO_CACHE(c), data);
419                 break;
420
421         case DATA_MODE_GET_FREE_PAGES:
422                 free_pages((unsigned long)data, c->pages_per_block_bits);
423                 break;
424
425         case DATA_MODE_VMALLOC:
426                 vfree(data);
427                 break;
428
429         default:
430                 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
431                        data_mode);
432                 BUG();
433         }
434 }
435
436 /*
437  * Allocate buffer and its data.
438  */
439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
440 {
441         struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
442                                       gfp_mask);
443
444         if (!b)
445                 return NULL;
446
447         b->c = c;
448
449         b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
450         if (!b->data) {
451                 kfree(b);
452                 return NULL;
453         }
454
455         adjust_total_allocated(b->data_mode, (long)c->block_size);
456
457         return b;
458 }
459
460 /*
461  * Free buffer and its data.
462  */
463 static void free_buffer(struct dm_buffer *b)
464 {
465         struct dm_bufio_client *c = b->c;
466
467         adjust_total_allocated(b->data_mode, -(long)c->block_size);
468
469         free_buffer_data(c, b->data, b->data_mode);
470         kfree(b);
471 }
472
473 /*
474  * Link buffer to the hash list and clean or dirty queue.
475  */
476 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
477 {
478         struct dm_bufio_client *c = b->c;
479
480         c->n_buffers[dirty]++;
481         b->block = block;
482         b->list_mode = dirty;
483         list_add(&b->lru_list, &c->lru[dirty]);
484         __insert(b->c, b);
485         b->last_accessed = jiffies;
486 }
487
488 /*
489  * Unlink buffer from the hash list and dirty or clean queue.
490  */
491 static void __unlink_buffer(struct dm_buffer *b)
492 {
493         struct dm_bufio_client *c = b->c;
494
495         BUG_ON(!c->n_buffers[b->list_mode]);
496
497         c->n_buffers[b->list_mode]--;
498         __remove(b->c, b);
499         list_del(&b->lru_list);
500 }
501
502 /*
503  * Place the buffer to the head of dirty or clean LRU queue.
504  */
505 static void __relink_lru(struct dm_buffer *b, int dirty)
506 {
507         struct dm_bufio_client *c = b->c;
508
509         BUG_ON(!c->n_buffers[b->list_mode]);
510
511         c->n_buffers[b->list_mode]--;
512         c->n_buffers[dirty]++;
513         b->list_mode = dirty;
514         list_move(&b->lru_list, &c->lru[dirty]);
515         b->last_accessed = jiffies;
516 }
517
518 /*----------------------------------------------------------------
519  * Submit I/O on the buffer.
520  *
521  * Bio interface is faster but it has some problems:
522  *      the vector list is limited (increasing this limit increases
523  *      memory-consumption per buffer, so it is not viable);
524  *
525  *      the memory must be direct-mapped, not vmalloced;
526  *
527  *      the I/O driver can reject requests spuriously if it thinks that
528  *      the requests are too big for the device or if they cross a
529  *      controller-defined memory boundary.
530  *
531  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
532  * it is not vmalloced, try using the bio interface.
533  *
534  * If the buffer is big, if it is vmalloced or if the underlying device
535  * rejects the bio because it is too large, use dm-io layer to do the I/O.
536  * The dm-io layer splits the I/O into multiple requests, avoiding the above
537  * shortcomings.
538  *--------------------------------------------------------------*/
539
540 /*
541  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
542  * that the request was handled directly with bio interface.
543  */
544 static void dmio_complete(unsigned long error, void *context)
545 {
546         struct dm_buffer *b = context;
547
548         b->bio.bi_error = error ? -EIO : 0;
549         b->bio.bi_end_io(&b->bio);
550 }
551
552 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
553                      bio_end_io_t *end_io)
554 {
555         int r;
556         struct dm_io_request io_req = {
557                 .bi_rw = rw,
558                 .notify.fn = dmio_complete,
559                 .notify.context = b,
560                 .client = b->c->dm_io,
561         };
562         struct dm_io_region region = {
563                 .bdev = b->c->bdev,
564                 .sector = block << b->c->sectors_per_block_bits,
565                 .count = b->c->block_size >> SECTOR_SHIFT,
566         };
567
568         if (b->data_mode != DATA_MODE_VMALLOC) {
569                 io_req.mem.type = DM_IO_KMEM;
570                 io_req.mem.ptr.addr = b->data;
571         } else {
572                 io_req.mem.type = DM_IO_VMA;
573                 io_req.mem.ptr.vma = b->data;
574         }
575
576         b->bio.bi_end_io = end_io;
577
578         r = dm_io(&io_req, 1, &region, NULL);
579         if (r) {
580                 b->bio.bi_error = r;
581                 end_io(&b->bio);
582         }
583 }
584
585 static void inline_endio(struct bio *bio)
586 {
587         bio_end_io_t *end_fn = bio->bi_private;
588         int error = bio->bi_error;
589
590         /*
591          * Reset the bio to free any attached resources
592          * (e.g. bio integrity profiles).
593          */
594         bio_reset(bio);
595
596         bio->bi_error = error;
597         end_fn(bio);
598 }
599
600 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
601                            bio_end_io_t *end_io)
602 {
603         char *ptr;
604         int len;
605
606         bio_init(&b->bio);
607         b->bio.bi_io_vec = b->bio_vec;
608         b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
609         b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
610         b->bio.bi_bdev = b->c->bdev;
611         b->bio.bi_end_io = inline_endio;
612         /*
613          * Use of .bi_private isn't a problem here because
614          * the dm_buffer's inline bio is local to bufio.
615          */
616         b->bio.bi_private = end_io;
617
618         /*
619          * We assume that if len >= PAGE_SIZE ptr is page-aligned.
620          * If len < PAGE_SIZE the buffer doesn't cross page boundary.
621          */
622         ptr = b->data;
623         len = b->c->block_size;
624
625         if (len >= PAGE_SIZE)
626                 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
627         else
628                 BUG_ON((unsigned long)ptr & (len - 1));
629
630         do {
631                 if (!bio_add_page(&b->bio, virt_to_page(ptr),
632                                   len < PAGE_SIZE ? len : PAGE_SIZE,
633                                   virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
634                         BUG_ON(b->c->block_size <= PAGE_SIZE);
635                         use_dmio(b, rw, block, end_io);
636                         return;
637                 }
638
639                 len -= PAGE_SIZE;
640                 ptr += PAGE_SIZE;
641         } while (len > 0);
642
643         submit_bio(rw, &b->bio);
644 }
645
646 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
647                       bio_end_io_t *end_io)
648 {
649         if (rw == WRITE && b->c->write_callback)
650                 b->c->write_callback(b);
651
652         if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
653             b->data_mode != DATA_MODE_VMALLOC)
654                 use_inline_bio(b, rw, block, end_io);
655         else
656                 use_dmio(b, rw, block, end_io);
657 }
658
659 /*----------------------------------------------------------------
660  * Writing dirty buffers
661  *--------------------------------------------------------------*/
662
663 /*
664  * The endio routine for write.
665  *
666  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
667  * it.
668  */
669 static void write_endio(struct bio *bio)
670 {
671         struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
672
673         b->write_error = bio->bi_error;
674         if (unlikely(bio->bi_error)) {
675                 struct dm_bufio_client *c = b->c;
676                 int error = bio->bi_error;
677                 (void)cmpxchg(&c->async_write_error, 0, error);
678         }
679
680         BUG_ON(!test_bit(B_WRITING, &b->state));
681
682         smp_mb__before_atomic();
683         clear_bit(B_WRITING, &b->state);
684         smp_mb__after_atomic();
685
686         wake_up_bit(&b->state, B_WRITING);
687 }
688
689 /*
690  * Initiate a write on a dirty buffer, but don't wait for it.
691  *
692  * - If the buffer is not dirty, exit.
693  * - If there some previous write going on, wait for it to finish (we can't
694  *   have two writes on the same buffer simultaneously).
695  * - Submit our write and don't wait on it. We set B_WRITING indicating
696  *   that there is a write in progress.
697  */
698 static void __write_dirty_buffer(struct dm_buffer *b,
699                                  struct list_head *write_list)
700 {
701         if (!test_bit(B_DIRTY, &b->state))
702                 return;
703
704         clear_bit(B_DIRTY, &b->state);
705         wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
706
707         if (!write_list)
708                 submit_io(b, WRITE, b->block, write_endio);
709         else
710                 list_add_tail(&b->write_list, write_list);
711 }
712
713 static void __flush_write_list(struct list_head *write_list)
714 {
715         struct blk_plug plug;
716         blk_start_plug(&plug);
717         while (!list_empty(write_list)) {
718                 struct dm_buffer *b =
719                         list_entry(write_list->next, struct dm_buffer, write_list);
720                 list_del(&b->write_list);
721                 submit_io(b, WRITE, b->block, write_endio);
722                 dm_bufio_cond_resched();
723         }
724         blk_finish_plug(&plug);
725 }
726
727 /*
728  * Wait until any activity on the buffer finishes.  Possibly write the
729  * buffer if it is dirty.  When this function finishes, there is no I/O
730  * running on the buffer and the buffer is not dirty.
731  */
732 static void __make_buffer_clean(struct dm_buffer *b)
733 {
734         BUG_ON(b->hold_count);
735
736         if (!b->state)  /* fast case */
737                 return;
738
739         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
740         __write_dirty_buffer(b, NULL);
741         wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
742 }
743
744 /*
745  * Find some buffer that is not held by anybody, clean it, unlink it and
746  * return it.
747  */
748 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
749 {
750         struct dm_buffer *b;
751
752         list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
753                 BUG_ON(test_bit(B_WRITING, &b->state));
754                 BUG_ON(test_bit(B_DIRTY, &b->state));
755
756                 if (!b->hold_count) {
757                         __make_buffer_clean(b);
758                         __unlink_buffer(b);
759                         return b;
760                 }
761                 dm_bufio_cond_resched();
762         }
763
764         list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
765                 BUG_ON(test_bit(B_READING, &b->state));
766
767                 if (!b->hold_count) {
768                         __make_buffer_clean(b);
769                         __unlink_buffer(b);
770                         return b;
771                 }
772                 dm_bufio_cond_resched();
773         }
774
775         return NULL;
776 }
777
778 /*
779  * Wait until some other threads free some buffer or release hold count on
780  * some buffer.
781  *
782  * This function is entered with c->lock held, drops it and regains it
783  * before exiting.
784  */
785 static void __wait_for_free_buffer(struct dm_bufio_client *c)
786 {
787         DECLARE_WAITQUEUE(wait, current);
788
789         add_wait_queue(&c->free_buffer_wait, &wait);
790         set_task_state(current, TASK_UNINTERRUPTIBLE);
791         dm_bufio_unlock(c);
792
793         io_schedule();
794
795         remove_wait_queue(&c->free_buffer_wait, &wait);
796
797         dm_bufio_lock(c);
798 }
799
800 enum new_flag {
801         NF_FRESH = 0,
802         NF_READ = 1,
803         NF_GET = 2,
804         NF_PREFETCH = 3
805 };
806
807 /*
808  * Allocate a new buffer. If the allocation is not possible, wait until
809  * some other thread frees a buffer.
810  *
811  * May drop the lock and regain it.
812  */
813 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
814 {
815         struct dm_buffer *b;
816
817         /*
818          * dm-bufio is resistant to allocation failures (it just keeps
819          * one buffer reserved in cases all the allocations fail).
820          * So set flags to not try too hard:
821          *      GFP_NOIO: don't recurse into the I/O layer
822          *      __GFP_NORETRY: don't retry and rather return failure
823          *      __GFP_NOMEMALLOC: don't use emergency reserves
824          *      __GFP_NOWARN: don't print a warning in case of failure
825          *
826          * For debugging, if we set the cache size to 1, no new buffers will
827          * be allocated.
828          */
829         while (1) {
830                 if (dm_bufio_cache_size_latch != 1) {
831                         b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
832                         if (b)
833                                 return b;
834                 }
835
836                 if (nf == NF_PREFETCH)
837                         return NULL;
838
839                 if (!list_empty(&c->reserved_buffers)) {
840                         b = list_entry(c->reserved_buffers.next,
841                                        struct dm_buffer, lru_list);
842                         list_del(&b->lru_list);
843                         c->need_reserved_buffers++;
844
845                         return b;
846                 }
847
848                 b = __get_unclaimed_buffer(c);
849                 if (b)
850                         return b;
851
852                 __wait_for_free_buffer(c);
853         }
854 }
855
856 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
857 {
858         struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
859
860         if (!b)
861                 return NULL;
862
863         if (c->alloc_callback)
864                 c->alloc_callback(b);
865
866         return b;
867 }
868
869 /*
870  * Free a buffer and wake other threads waiting for free buffers.
871  */
872 static void __free_buffer_wake(struct dm_buffer *b)
873 {
874         struct dm_bufio_client *c = b->c;
875
876         if (!c->need_reserved_buffers)
877                 free_buffer(b);
878         else {
879                 list_add(&b->lru_list, &c->reserved_buffers);
880                 c->need_reserved_buffers--;
881         }
882
883         wake_up(&c->free_buffer_wait);
884 }
885
886 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
887                                         struct list_head *write_list)
888 {
889         struct dm_buffer *b, *tmp;
890
891         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
892                 BUG_ON(test_bit(B_READING, &b->state));
893
894                 if (!test_bit(B_DIRTY, &b->state) &&
895                     !test_bit(B_WRITING, &b->state)) {
896                         __relink_lru(b, LIST_CLEAN);
897                         continue;
898                 }
899
900                 if (no_wait && test_bit(B_WRITING, &b->state))
901                         return;
902
903                 __write_dirty_buffer(b, write_list);
904                 dm_bufio_cond_resched();
905         }
906 }
907
908 /*
909  * Get writeback threshold and buffer limit for a given client.
910  */
911 static void __get_memory_limit(struct dm_bufio_client *c,
912                                unsigned long *threshold_buffers,
913                                unsigned long *limit_buffers)
914 {
915         unsigned long buffers;
916
917         if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
918                 mutex_lock(&dm_bufio_clients_lock);
919                 __cache_size_refresh();
920                 mutex_unlock(&dm_bufio_clients_lock);
921         }
922
923         buffers = dm_bufio_cache_size_per_client >>
924                   (c->sectors_per_block_bits + SECTOR_SHIFT);
925
926         if (buffers < c->minimum_buffers)
927                 buffers = c->minimum_buffers;
928
929         *limit_buffers = buffers;
930         *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
931 }
932
933 /*
934  * Check if we're over watermark.
935  * If we are over threshold_buffers, start freeing buffers.
936  * If we're over "limit_buffers", block until we get under the limit.
937  */
938 static void __check_watermark(struct dm_bufio_client *c,
939                               struct list_head *write_list)
940 {
941         unsigned long threshold_buffers, limit_buffers;
942
943         __get_memory_limit(c, &threshold_buffers, &limit_buffers);
944
945         while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
946                limit_buffers) {
947
948                 struct dm_buffer *b = __get_unclaimed_buffer(c);
949
950                 if (!b)
951                         return;
952
953                 __free_buffer_wake(b);
954                 dm_bufio_cond_resched();
955         }
956
957         if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
958                 __write_dirty_buffers_async(c, 1, write_list);
959 }
960
961 /*----------------------------------------------------------------
962  * Getting a buffer
963  *--------------------------------------------------------------*/
964
965 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
966                                      enum new_flag nf, int *need_submit,
967                                      struct list_head *write_list)
968 {
969         struct dm_buffer *b, *new_b = NULL;
970
971         *need_submit = 0;
972
973         b = __find(c, block);
974         if (b)
975                 goto found_buffer;
976
977         if (nf == NF_GET)
978                 return NULL;
979
980         new_b = __alloc_buffer_wait(c, nf);
981         if (!new_b)
982                 return NULL;
983
984         /*
985          * We've had a period where the mutex was unlocked, so need to
986          * recheck the hash table.
987          */
988         b = __find(c, block);
989         if (b) {
990                 __free_buffer_wake(new_b);
991                 goto found_buffer;
992         }
993
994         __check_watermark(c, write_list);
995
996         b = new_b;
997         b->hold_count = 1;
998         b->read_error = 0;
999         b->write_error = 0;
1000         __link_buffer(b, block, LIST_CLEAN);
1001
1002         if (nf == NF_FRESH) {
1003                 b->state = 0;
1004                 return b;
1005         }
1006
1007         b->state = 1 << B_READING;
1008         *need_submit = 1;
1009
1010         return b;
1011
1012 found_buffer:
1013         if (nf == NF_PREFETCH)
1014                 return NULL;
1015         /*
1016          * Note: it is essential that we don't wait for the buffer to be
1017          * read if dm_bufio_get function is used. Both dm_bufio_get and
1018          * dm_bufio_prefetch can be used in the driver request routine.
1019          * If the user called both dm_bufio_prefetch and dm_bufio_get on
1020          * the same buffer, it would deadlock if we waited.
1021          */
1022         if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1023                 return NULL;
1024
1025         b->hold_count++;
1026         __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1027                      test_bit(B_WRITING, &b->state));
1028         return b;
1029 }
1030
1031 /*
1032  * The endio routine for reading: set the error, clear the bit and wake up
1033  * anyone waiting on the buffer.
1034  */
1035 static void read_endio(struct bio *bio)
1036 {
1037         struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1038
1039         b->read_error = bio->bi_error;
1040
1041         BUG_ON(!test_bit(B_READING, &b->state));
1042
1043         smp_mb__before_atomic();
1044         clear_bit(B_READING, &b->state);
1045         smp_mb__after_atomic();
1046
1047         wake_up_bit(&b->state, B_READING);
1048 }
1049
1050 /*
1051  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1052  * functions is similar except that dm_bufio_new doesn't read the
1053  * buffer from the disk (assuming that the caller overwrites all the data
1054  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1055  */
1056 static void *new_read(struct dm_bufio_client *c, sector_t block,
1057                       enum new_flag nf, struct dm_buffer **bp)
1058 {
1059         int need_submit;
1060         struct dm_buffer *b;
1061
1062         LIST_HEAD(write_list);
1063
1064         dm_bufio_lock(c);
1065         b = __bufio_new(c, block, nf, &need_submit, &write_list);
1066         dm_bufio_unlock(c);
1067
1068         __flush_write_list(&write_list);
1069
1070         if (!b)
1071                 return b;
1072
1073         if (need_submit)
1074                 submit_io(b, READ, b->block, read_endio);
1075
1076         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1077
1078         if (b->read_error) {
1079                 int error = b->read_error;
1080
1081                 dm_bufio_release(b);
1082
1083                 return ERR_PTR(error);
1084         }
1085
1086         *bp = b;
1087
1088         return b->data;
1089 }
1090
1091 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1092                    struct dm_buffer **bp)
1093 {
1094         return new_read(c, block, NF_GET, bp);
1095 }
1096 EXPORT_SYMBOL_GPL(dm_bufio_get);
1097
1098 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1099                     struct dm_buffer **bp)
1100 {
1101         BUG_ON(dm_bufio_in_request());
1102
1103         return new_read(c, block, NF_READ, bp);
1104 }
1105 EXPORT_SYMBOL_GPL(dm_bufio_read);
1106
1107 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1108                    struct dm_buffer **bp)
1109 {
1110         BUG_ON(dm_bufio_in_request());
1111
1112         return new_read(c, block, NF_FRESH, bp);
1113 }
1114 EXPORT_SYMBOL_GPL(dm_bufio_new);
1115
1116 void dm_bufio_prefetch(struct dm_bufio_client *c,
1117                        sector_t block, unsigned n_blocks)
1118 {
1119         struct blk_plug plug;
1120
1121         LIST_HEAD(write_list);
1122
1123         BUG_ON(dm_bufio_in_request());
1124
1125         blk_start_plug(&plug);
1126         dm_bufio_lock(c);
1127
1128         for (; n_blocks--; block++) {
1129                 int need_submit;
1130                 struct dm_buffer *b;
1131                 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1132                                 &write_list);
1133                 if (unlikely(!list_empty(&write_list))) {
1134                         dm_bufio_unlock(c);
1135                         blk_finish_plug(&plug);
1136                         __flush_write_list(&write_list);
1137                         blk_start_plug(&plug);
1138                         dm_bufio_lock(c);
1139                 }
1140                 if (unlikely(b != NULL)) {
1141                         dm_bufio_unlock(c);
1142
1143                         if (need_submit)
1144                                 submit_io(b, READ, b->block, read_endio);
1145                         dm_bufio_release(b);
1146
1147                         dm_bufio_cond_resched();
1148
1149                         if (!n_blocks)
1150                                 goto flush_plug;
1151                         dm_bufio_lock(c);
1152                 }
1153         }
1154
1155         dm_bufio_unlock(c);
1156
1157 flush_plug:
1158         blk_finish_plug(&plug);
1159 }
1160 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1161
1162 void dm_bufio_release(struct dm_buffer *b)
1163 {
1164         struct dm_bufio_client *c = b->c;
1165
1166         dm_bufio_lock(c);
1167
1168         BUG_ON(!b->hold_count);
1169
1170         b->hold_count--;
1171         if (!b->hold_count) {
1172                 wake_up(&c->free_buffer_wait);
1173
1174                 /*
1175                  * If there were errors on the buffer, and the buffer is not
1176                  * to be written, free the buffer. There is no point in caching
1177                  * invalid buffer.
1178                  */
1179                 if ((b->read_error || b->write_error) &&
1180                     !test_bit(B_READING, &b->state) &&
1181                     !test_bit(B_WRITING, &b->state) &&
1182                     !test_bit(B_DIRTY, &b->state)) {
1183                         __unlink_buffer(b);
1184                         __free_buffer_wake(b);
1185                 }
1186         }
1187
1188         dm_bufio_unlock(c);
1189 }
1190 EXPORT_SYMBOL_GPL(dm_bufio_release);
1191
1192 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1193 {
1194         struct dm_bufio_client *c = b->c;
1195
1196         dm_bufio_lock(c);
1197
1198         BUG_ON(test_bit(B_READING, &b->state));
1199
1200         if (!test_and_set_bit(B_DIRTY, &b->state))
1201                 __relink_lru(b, LIST_DIRTY);
1202
1203         dm_bufio_unlock(c);
1204 }
1205 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1206
1207 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1208 {
1209         LIST_HEAD(write_list);
1210
1211         BUG_ON(dm_bufio_in_request());
1212
1213         dm_bufio_lock(c);
1214         __write_dirty_buffers_async(c, 0, &write_list);
1215         dm_bufio_unlock(c);
1216         __flush_write_list(&write_list);
1217 }
1218 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1219
1220 /*
1221  * For performance, it is essential that the buffers are written asynchronously
1222  * and simultaneously (so that the block layer can merge the writes) and then
1223  * waited upon.
1224  *
1225  * Finally, we flush hardware disk cache.
1226  */
1227 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1228 {
1229         int a, f;
1230         unsigned long buffers_processed = 0;
1231         struct dm_buffer *b, *tmp;
1232
1233         LIST_HEAD(write_list);
1234
1235         dm_bufio_lock(c);
1236         __write_dirty_buffers_async(c, 0, &write_list);
1237         dm_bufio_unlock(c);
1238         __flush_write_list(&write_list);
1239         dm_bufio_lock(c);
1240
1241 again:
1242         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1243                 int dropped_lock = 0;
1244
1245                 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1246                         buffers_processed++;
1247
1248                 BUG_ON(test_bit(B_READING, &b->state));
1249
1250                 if (test_bit(B_WRITING, &b->state)) {
1251                         if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1252                                 dropped_lock = 1;
1253                                 b->hold_count++;
1254                                 dm_bufio_unlock(c);
1255                                 wait_on_bit_io(&b->state, B_WRITING,
1256                                                TASK_UNINTERRUPTIBLE);
1257                                 dm_bufio_lock(c);
1258                                 b->hold_count--;
1259                         } else
1260                                 wait_on_bit_io(&b->state, B_WRITING,
1261                                                TASK_UNINTERRUPTIBLE);
1262                 }
1263
1264                 if (!test_bit(B_DIRTY, &b->state) &&
1265                     !test_bit(B_WRITING, &b->state))
1266                         __relink_lru(b, LIST_CLEAN);
1267
1268                 dm_bufio_cond_resched();
1269
1270                 /*
1271                  * If we dropped the lock, the list is no longer consistent,
1272                  * so we must restart the search.
1273                  *
1274                  * In the most common case, the buffer just processed is
1275                  * relinked to the clean list, so we won't loop scanning the
1276                  * same buffer again and again.
1277                  *
1278                  * This may livelock if there is another thread simultaneously
1279                  * dirtying buffers, so we count the number of buffers walked
1280                  * and if it exceeds the total number of buffers, it means that
1281                  * someone is doing some writes simultaneously with us.  In
1282                  * this case, stop, dropping the lock.
1283                  */
1284                 if (dropped_lock)
1285                         goto again;
1286         }
1287         wake_up(&c->free_buffer_wait);
1288         dm_bufio_unlock(c);
1289
1290         a = xchg(&c->async_write_error, 0);
1291         f = dm_bufio_issue_flush(c);
1292         if (a)
1293                 return a;
1294
1295         return f;
1296 }
1297 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1298
1299 /*
1300  * Use dm-io to send and empty barrier flush the device.
1301  */
1302 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1303 {
1304         struct dm_io_request io_req = {
1305                 .bi_rw = WRITE_FLUSH,
1306                 .mem.type = DM_IO_KMEM,
1307                 .mem.ptr.addr = NULL,
1308                 .client = c->dm_io,
1309         };
1310         struct dm_io_region io_reg = {
1311                 .bdev = c->bdev,
1312                 .sector = 0,
1313                 .count = 0,
1314         };
1315
1316         BUG_ON(dm_bufio_in_request());
1317
1318         return dm_io(&io_req, 1, &io_reg, NULL);
1319 }
1320 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1321
1322 /*
1323  * We first delete any other buffer that may be at that new location.
1324  *
1325  * Then, we write the buffer to the original location if it was dirty.
1326  *
1327  * Then, if we are the only one who is holding the buffer, relink the buffer
1328  * in the hash queue for the new location.
1329  *
1330  * If there was someone else holding the buffer, we write it to the new
1331  * location but not relink it, because that other user needs to have the buffer
1332  * at the same place.
1333  */
1334 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1335 {
1336         struct dm_bufio_client *c = b->c;
1337         struct dm_buffer *new;
1338
1339         BUG_ON(dm_bufio_in_request());
1340
1341         dm_bufio_lock(c);
1342
1343 retry:
1344         new = __find(c, new_block);
1345         if (new) {
1346                 if (new->hold_count) {
1347                         __wait_for_free_buffer(c);
1348                         goto retry;
1349                 }
1350
1351                 /*
1352                  * FIXME: Is there any point waiting for a write that's going
1353                  * to be overwritten in a bit?
1354                  */
1355                 __make_buffer_clean(new);
1356                 __unlink_buffer(new);
1357                 __free_buffer_wake(new);
1358         }
1359
1360         BUG_ON(!b->hold_count);
1361         BUG_ON(test_bit(B_READING, &b->state));
1362
1363         __write_dirty_buffer(b, NULL);
1364         if (b->hold_count == 1) {
1365                 wait_on_bit_io(&b->state, B_WRITING,
1366                                TASK_UNINTERRUPTIBLE);
1367                 set_bit(B_DIRTY, &b->state);
1368                 __unlink_buffer(b);
1369                 __link_buffer(b, new_block, LIST_DIRTY);
1370         } else {
1371                 sector_t old_block;
1372                 wait_on_bit_lock_io(&b->state, B_WRITING,
1373                                     TASK_UNINTERRUPTIBLE);
1374                 /*
1375                  * Relink buffer to "new_block" so that write_callback
1376                  * sees "new_block" as a block number.
1377                  * After the write, link the buffer back to old_block.
1378                  * All this must be done in bufio lock, so that block number
1379                  * change isn't visible to other threads.
1380                  */
1381                 old_block = b->block;
1382                 __unlink_buffer(b);
1383                 __link_buffer(b, new_block, b->list_mode);
1384                 submit_io(b, WRITE, new_block, write_endio);
1385                 wait_on_bit_io(&b->state, B_WRITING,
1386                                TASK_UNINTERRUPTIBLE);
1387                 __unlink_buffer(b);
1388                 __link_buffer(b, old_block, b->list_mode);
1389         }
1390
1391         dm_bufio_unlock(c);
1392         dm_bufio_release(b);
1393 }
1394 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1395
1396 /*
1397  * Free the given buffer.
1398  *
1399  * This is just a hint, if the buffer is in use or dirty, this function
1400  * does nothing.
1401  */
1402 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1403 {
1404         struct dm_buffer *b;
1405
1406         dm_bufio_lock(c);
1407
1408         b = __find(c, block);
1409         if (b && likely(!b->hold_count) && likely(!b->state)) {
1410                 __unlink_buffer(b);
1411                 __free_buffer_wake(b);
1412         }
1413
1414         dm_bufio_unlock(c);
1415 }
1416 EXPORT_SYMBOL(dm_bufio_forget);
1417
1418 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1419 {
1420         c->minimum_buffers = n;
1421 }
1422 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1423
1424 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1425 {
1426         return c->block_size;
1427 }
1428 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1429
1430 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1431 {
1432         return i_size_read(c->bdev->bd_inode) >>
1433                            (SECTOR_SHIFT + c->sectors_per_block_bits);
1434 }
1435 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1436
1437 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1438 {
1439         return b->block;
1440 }
1441 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1442
1443 void *dm_bufio_get_block_data(struct dm_buffer *b)
1444 {
1445         return b->data;
1446 }
1447 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1448
1449 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1450 {
1451         return b + 1;
1452 }
1453 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1454
1455 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1456 {
1457         return b->c;
1458 }
1459 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1460
1461 static void drop_buffers(struct dm_bufio_client *c)
1462 {
1463         struct dm_buffer *b;
1464         int i;
1465
1466         BUG_ON(dm_bufio_in_request());
1467
1468         /*
1469          * An optimization so that the buffers are not written one-by-one.
1470          */
1471         dm_bufio_write_dirty_buffers_async(c);
1472
1473         dm_bufio_lock(c);
1474
1475         while ((b = __get_unclaimed_buffer(c)))
1476                 __free_buffer_wake(b);
1477
1478         for (i = 0; i < LIST_SIZE; i++)
1479                 list_for_each_entry(b, &c->lru[i], lru_list)
1480                         DMERR("leaked buffer %llx, hold count %u, list %d",
1481                               (unsigned long long)b->block, b->hold_count, i);
1482
1483         for (i = 0; i < LIST_SIZE; i++)
1484                 BUG_ON(!list_empty(&c->lru[i]));
1485
1486         dm_bufio_unlock(c);
1487 }
1488
1489 /*
1490  * We may not be able to evict this buffer if IO pending or the client
1491  * is still using it.  Caller is expected to know buffer is too old.
1492  *
1493  * And if GFP_NOFS is used, we must not do any I/O because we hold
1494  * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1495  * rerouted to different bufio client.
1496  */
1497 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1498 {
1499         if (!(gfp & __GFP_FS)) {
1500                 if (test_bit(B_READING, &b->state) ||
1501                     test_bit(B_WRITING, &b->state) ||
1502                     test_bit(B_DIRTY, &b->state))
1503                         return false;
1504         }
1505
1506         if (b->hold_count)
1507                 return false;
1508
1509         __make_buffer_clean(b);
1510         __unlink_buffer(b);
1511         __free_buffer_wake(b);
1512
1513         return true;
1514 }
1515
1516 static unsigned get_retain_buffers(struct dm_bufio_client *c)
1517 {
1518         unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1519         return retain_bytes / c->block_size;
1520 }
1521
1522 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1523                             gfp_t gfp_mask)
1524 {
1525         int l;
1526         struct dm_buffer *b, *tmp;
1527         unsigned long freed = 0;
1528         unsigned long count = nr_to_scan;
1529         unsigned retain_target = get_retain_buffers(c);
1530
1531         for (l = 0; l < LIST_SIZE; l++) {
1532                 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1533                         if (__try_evict_buffer(b, gfp_mask))
1534                                 freed++;
1535                         if (!--nr_to_scan || ((count - freed) <= retain_target))
1536                                 return freed;
1537                         dm_bufio_cond_resched();
1538                 }
1539         }
1540         return freed;
1541 }
1542
1543 static unsigned long
1544 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1545 {
1546         struct dm_bufio_client *c;
1547         unsigned long freed;
1548
1549         c = container_of(shrink, struct dm_bufio_client, shrinker);
1550         if (sc->gfp_mask & __GFP_FS)
1551                 dm_bufio_lock(c);
1552         else if (!dm_bufio_trylock(c))
1553                 return SHRINK_STOP;
1554
1555         freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1556         dm_bufio_unlock(c);
1557         return freed;
1558 }
1559
1560 static unsigned long
1561 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1562 {
1563         struct dm_bufio_client *c;
1564         unsigned long count;
1565
1566         c = container_of(shrink, struct dm_bufio_client, shrinker);
1567         if (sc->gfp_mask & __GFP_FS)
1568                 dm_bufio_lock(c);
1569         else if (!dm_bufio_trylock(c))
1570                 return 0;
1571
1572         count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1573         dm_bufio_unlock(c);
1574         return count;
1575 }
1576
1577 /*
1578  * Create the buffering interface
1579  */
1580 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1581                                                unsigned reserved_buffers, unsigned aux_size,
1582                                                void (*alloc_callback)(struct dm_buffer *),
1583                                                void (*write_callback)(struct dm_buffer *))
1584 {
1585         int r;
1586         struct dm_bufio_client *c;
1587         unsigned i;
1588
1589         BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1590                (block_size & (block_size - 1)));
1591
1592         c = kzalloc(sizeof(*c), GFP_KERNEL);
1593         if (!c) {
1594                 r = -ENOMEM;
1595                 goto bad_client;
1596         }
1597         c->buffer_tree = RB_ROOT;
1598
1599         c->bdev = bdev;
1600         c->block_size = block_size;
1601         c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1602         c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1603                                   __ffs(block_size) - PAGE_SHIFT : 0;
1604         c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1605                                   PAGE_SHIFT - __ffs(block_size) : 0);
1606
1607         c->aux_size = aux_size;
1608         c->alloc_callback = alloc_callback;
1609         c->write_callback = write_callback;
1610
1611         for (i = 0; i < LIST_SIZE; i++) {
1612                 INIT_LIST_HEAD(&c->lru[i]);
1613                 c->n_buffers[i] = 0;
1614         }
1615
1616         mutex_init(&c->lock);
1617         INIT_LIST_HEAD(&c->reserved_buffers);
1618         c->need_reserved_buffers = reserved_buffers;
1619
1620         c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1621
1622         init_waitqueue_head(&c->free_buffer_wait);
1623         c->async_write_error = 0;
1624
1625         c->dm_io = dm_io_client_create();
1626         if (IS_ERR(c->dm_io)) {
1627                 r = PTR_ERR(c->dm_io);
1628                 goto bad_dm_io;
1629         }
1630
1631         mutex_lock(&dm_bufio_clients_lock);
1632         if (c->blocks_per_page_bits) {
1633                 if (!DM_BUFIO_CACHE_NAME(c)) {
1634                         DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1635                         if (!DM_BUFIO_CACHE_NAME(c)) {
1636                                 r = -ENOMEM;
1637                                 mutex_unlock(&dm_bufio_clients_lock);
1638                                 goto bad_cache;
1639                         }
1640                 }
1641
1642                 if (!DM_BUFIO_CACHE(c)) {
1643                         DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1644                                                               c->block_size,
1645                                                               c->block_size, 0, NULL);
1646                         if (!DM_BUFIO_CACHE(c)) {
1647                                 r = -ENOMEM;
1648                                 mutex_unlock(&dm_bufio_clients_lock);
1649                                 goto bad_cache;
1650                         }
1651                 }
1652         }
1653         mutex_unlock(&dm_bufio_clients_lock);
1654
1655         while (c->need_reserved_buffers) {
1656                 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1657
1658                 if (!b) {
1659                         r = -ENOMEM;
1660                         goto bad_buffer;
1661                 }
1662                 __free_buffer_wake(b);
1663         }
1664
1665         mutex_lock(&dm_bufio_clients_lock);
1666         dm_bufio_client_count++;
1667         list_add(&c->client_list, &dm_bufio_all_clients);
1668         __cache_size_refresh();
1669         mutex_unlock(&dm_bufio_clients_lock);
1670
1671         c->shrinker.count_objects = dm_bufio_shrink_count;
1672         c->shrinker.scan_objects = dm_bufio_shrink_scan;
1673         c->shrinker.seeks = 1;
1674         c->shrinker.batch = 0;
1675         register_shrinker(&c->shrinker);
1676
1677         return c;
1678
1679 bad_buffer:
1680 bad_cache:
1681         while (!list_empty(&c->reserved_buffers)) {
1682                 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1683                                                  struct dm_buffer, lru_list);
1684                 list_del(&b->lru_list);
1685                 free_buffer(b);
1686         }
1687         dm_io_client_destroy(c->dm_io);
1688 bad_dm_io:
1689         kfree(c);
1690 bad_client:
1691         return ERR_PTR(r);
1692 }
1693 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1694
1695 /*
1696  * Free the buffering interface.
1697  * It is required that there are no references on any buffers.
1698  */
1699 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1700 {
1701         unsigned i;
1702
1703         drop_buffers(c);
1704
1705         unregister_shrinker(&c->shrinker);
1706
1707         mutex_lock(&dm_bufio_clients_lock);
1708
1709         list_del(&c->client_list);
1710         dm_bufio_client_count--;
1711         __cache_size_refresh();
1712
1713         mutex_unlock(&dm_bufio_clients_lock);
1714
1715         BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1716         BUG_ON(c->need_reserved_buffers);
1717
1718         while (!list_empty(&c->reserved_buffers)) {
1719                 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1720                                                  struct dm_buffer, lru_list);
1721                 list_del(&b->lru_list);
1722                 free_buffer(b);
1723         }
1724
1725         for (i = 0; i < LIST_SIZE; i++)
1726                 if (c->n_buffers[i])
1727                         DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1728
1729         for (i = 0; i < LIST_SIZE; i++)
1730                 BUG_ON(c->n_buffers[i]);
1731
1732         dm_io_client_destroy(c->dm_io);
1733         kfree(c);
1734 }
1735 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1736
1737 static unsigned get_max_age_hz(void)
1738 {
1739         unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1740
1741         if (max_age > UINT_MAX / HZ)
1742                 max_age = UINT_MAX / HZ;
1743
1744         return max_age * HZ;
1745 }
1746
1747 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1748 {
1749         return time_after_eq(jiffies, b->last_accessed + age_hz);
1750 }
1751
1752 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1753 {
1754         struct dm_buffer *b, *tmp;
1755         unsigned retain_target = get_retain_buffers(c);
1756         unsigned count;
1757
1758         dm_bufio_lock(c);
1759
1760         count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1761         list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1762                 if (count <= retain_target)
1763                         break;
1764
1765                 if (!older_than(b, age_hz))
1766                         break;
1767
1768                 if (__try_evict_buffer(b, 0))
1769                         count--;
1770
1771                 dm_bufio_cond_resched();
1772         }
1773
1774         dm_bufio_unlock(c);
1775 }
1776
1777 static void cleanup_old_buffers(void)
1778 {
1779         unsigned long max_age_hz = get_max_age_hz();
1780         struct dm_bufio_client *c;
1781
1782         mutex_lock(&dm_bufio_clients_lock);
1783
1784         list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1785                 __evict_old_buffers(c, max_age_hz);
1786
1787         mutex_unlock(&dm_bufio_clients_lock);
1788 }
1789
1790 static struct workqueue_struct *dm_bufio_wq;
1791 static struct delayed_work dm_bufio_work;
1792
1793 static void work_fn(struct work_struct *w)
1794 {
1795         cleanup_old_buffers();
1796
1797         queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1798                            DM_BUFIO_WORK_TIMER_SECS * HZ);
1799 }
1800
1801 /*----------------------------------------------------------------
1802  * Module setup
1803  *--------------------------------------------------------------*/
1804
1805 /*
1806  * This is called only once for the whole dm_bufio module.
1807  * It initializes memory limit.
1808  */
1809 static int __init dm_bufio_init(void)
1810 {
1811         __u64 mem;
1812
1813         dm_bufio_allocated_kmem_cache = 0;
1814         dm_bufio_allocated_get_free_pages = 0;
1815         dm_bufio_allocated_vmalloc = 0;
1816         dm_bufio_current_allocated = 0;
1817
1818         memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1819         memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1820
1821         mem = (__u64)((totalram_pages - totalhigh_pages) *
1822                       DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1823
1824         if (mem > ULONG_MAX)
1825                 mem = ULONG_MAX;
1826
1827 #ifdef CONFIG_MMU
1828         /*
1829          * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1830          * in fs/proc/internal.h
1831          */
1832         if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1833                 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1834 #endif
1835
1836         dm_bufio_default_cache_size = mem;
1837
1838         mutex_lock(&dm_bufio_clients_lock);
1839         __cache_size_refresh();
1840         mutex_unlock(&dm_bufio_clients_lock);
1841
1842         dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1843         if (!dm_bufio_wq)
1844                 return -ENOMEM;
1845
1846         INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1847         queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1848                            DM_BUFIO_WORK_TIMER_SECS * HZ);
1849
1850         return 0;
1851 }
1852
1853 /*
1854  * This is called once when unloading the dm_bufio module.
1855  */
1856 static void __exit dm_bufio_exit(void)
1857 {
1858         int bug = 0;
1859         int i;
1860
1861         cancel_delayed_work_sync(&dm_bufio_work);
1862         destroy_workqueue(dm_bufio_wq);
1863
1864         for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1865                 kmem_cache_destroy(dm_bufio_caches[i]);
1866
1867         for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1868                 kfree(dm_bufio_cache_names[i]);
1869
1870         if (dm_bufio_client_count) {
1871                 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1872                         __func__, dm_bufio_client_count);
1873                 bug = 1;
1874         }
1875
1876         if (dm_bufio_current_allocated) {
1877                 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1878                         __func__, dm_bufio_current_allocated);
1879                 bug = 1;
1880         }
1881
1882         if (dm_bufio_allocated_get_free_pages) {
1883                 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1884                        __func__, dm_bufio_allocated_get_free_pages);
1885                 bug = 1;
1886         }
1887
1888         if (dm_bufio_allocated_vmalloc) {
1889                 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1890                        __func__, dm_bufio_allocated_vmalloc);
1891                 bug = 1;
1892         }
1893
1894         if (bug)
1895                 BUG();
1896 }
1897
1898 module_init(dm_bufio_init)
1899 module_exit(dm_bufio_exit)
1900
1901 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1902 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1903
1904 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1905 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1906
1907 module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1908 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1909
1910 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1911 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1912
1913 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1914 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1915
1916 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1917 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1918
1919 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1920 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1921
1922 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1923 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1924
1925 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1926 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1927 MODULE_LICENSE("GPL");