s390/sclp: remove unnecessary XTABS flag
[firefly-linux-kernel-4.4.55.git] / drivers / md / bcache / super.c
1 /*
2  * bcache setup/teardown code, and some metadata io - read a superblock and
3  * figure out what to do with it.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "extents.h"
13 #include "request.h"
14 #include "writeback.h"
15
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/reboot.h>
25 #include <linux/sysfs.h>
26
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
29
30 static const char bcache_magic[] = {
31         0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32         0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
33 };
34
35 static const char invalid_uuid[] = {
36         0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37         0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
38 };
39
40 /* Default is -1; we skip past it for struct cached_dev's cache mode */
41 const char * const bch_cache_modes[] = {
42         "default",
43         "writethrough",
44         "writeback",
45         "writearound",
46         "none",
47         NULL
48 };
49
50 static struct kobject *bcache_kobj;
51 struct mutex bch_register_lock;
52 LIST_HEAD(bch_cache_sets);
53 static LIST_HEAD(uncached_devices);
54
55 static int bcache_major;
56 static DEFINE_IDA(bcache_minor);
57 static wait_queue_head_t unregister_wait;
58 struct workqueue_struct *bcache_wq;
59
60 #define BTREE_MAX_PAGES         (256 * 1024 / PAGE_SIZE)
61
62 static void bio_split_pool_free(struct bio_split_pool *p)
63 {
64         if (p->bio_split_hook)
65                 mempool_destroy(p->bio_split_hook);
66
67         if (p->bio_split)
68                 bioset_free(p->bio_split);
69 }
70
71 static int bio_split_pool_init(struct bio_split_pool *p)
72 {
73         p->bio_split = bioset_create(4, 0);
74         if (!p->bio_split)
75                 return -ENOMEM;
76
77         p->bio_split_hook = mempool_create_kmalloc_pool(4,
78                                 sizeof(struct bio_split_hook));
79         if (!p->bio_split_hook)
80                 return -ENOMEM;
81
82         return 0;
83 }
84
85 /* Superblock */
86
87 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
88                               struct page **res)
89 {
90         const char *err;
91         struct cache_sb *s;
92         struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
93         unsigned i;
94
95         if (!bh)
96                 return "IO error";
97
98         s = (struct cache_sb *) bh->b_data;
99
100         sb->offset              = le64_to_cpu(s->offset);
101         sb->version             = le64_to_cpu(s->version);
102
103         memcpy(sb->magic,       s->magic, 16);
104         memcpy(sb->uuid,        s->uuid, 16);
105         memcpy(sb->set_uuid,    s->set_uuid, 16);
106         memcpy(sb->label,       s->label, SB_LABEL_SIZE);
107
108         sb->flags               = le64_to_cpu(s->flags);
109         sb->seq                 = le64_to_cpu(s->seq);
110         sb->last_mount          = le32_to_cpu(s->last_mount);
111         sb->first_bucket        = le16_to_cpu(s->first_bucket);
112         sb->keys                = le16_to_cpu(s->keys);
113
114         for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
115                 sb->d[i] = le64_to_cpu(s->d[i]);
116
117         pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
118                  sb->version, sb->flags, sb->seq, sb->keys);
119
120         err = "Not a bcache superblock";
121         if (sb->offset != SB_SECTOR)
122                 goto err;
123
124         if (memcmp(sb->magic, bcache_magic, 16))
125                 goto err;
126
127         err = "Too many journal buckets";
128         if (sb->keys > SB_JOURNAL_BUCKETS)
129                 goto err;
130
131         err = "Bad checksum";
132         if (s->csum != csum_set(s))
133                 goto err;
134
135         err = "Bad UUID";
136         if (bch_is_zero(sb->uuid, 16))
137                 goto err;
138
139         sb->block_size  = le16_to_cpu(s->block_size);
140
141         err = "Superblock block size smaller than device block size";
142         if (sb->block_size << 9 < bdev_logical_block_size(bdev))
143                 goto err;
144
145         switch (sb->version) {
146         case BCACHE_SB_VERSION_BDEV:
147                 sb->data_offset = BDEV_DATA_START_DEFAULT;
148                 break;
149         case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
150                 sb->data_offset = le64_to_cpu(s->data_offset);
151
152                 err = "Bad data offset";
153                 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
154                         goto err;
155
156                 break;
157         case BCACHE_SB_VERSION_CDEV:
158         case BCACHE_SB_VERSION_CDEV_WITH_UUID:
159                 sb->nbuckets    = le64_to_cpu(s->nbuckets);
160                 sb->block_size  = le16_to_cpu(s->block_size);
161                 sb->bucket_size = le16_to_cpu(s->bucket_size);
162
163                 sb->nr_in_set   = le16_to_cpu(s->nr_in_set);
164                 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
165
166                 err = "Too many buckets";
167                 if (sb->nbuckets > LONG_MAX)
168                         goto err;
169
170                 err = "Not enough buckets";
171                 if (sb->nbuckets < 1 << 7)
172                         goto err;
173
174                 err = "Bad block/bucket size";
175                 if (!is_power_of_2(sb->block_size) ||
176                     sb->block_size > PAGE_SECTORS ||
177                     !is_power_of_2(sb->bucket_size) ||
178                     sb->bucket_size < PAGE_SECTORS)
179                         goto err;
180
181                 err = "Invalid superblock: device too small";
182                 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
183                         goto err;
184
185                 err = "Bad UUID";
186                 if (bch_is_zero(sb->set_uuid, 16))
187                         goto err;
188
189                 err = "Bad cache device number in set";
190                 if (!sb->nr_in_set ||
191                     sb->nr_in_set <= sb->nr_this_dev ||
192                     sb->nr_in_set > MAX_CACHES_PER_SET)
193                         goto err;
194
195                 err = "Journal buckets not sequential";
196                 for (i = 0; i < sb->keys; i++)
197                         if (sb->d[i] != sb->first_bucket + i)
198                                 goto err;
199
200                 err = "Too many journal buckets";
201                 if (sb->first_bucket + sb->keys > sb->nbuckets)
202                         goto err;
203
204                 err = "Invalid superblock: first bucket comes before end of super";
205                 if (sb->first_bucket * sb->bucket_size < 16)
206                         goto err;
207
208                 break;
209         default:
210                 err = "Unsupported superblock version";
211                 goto err;
212         }
213
214         sb->last_mount = get_seconds();
215         err = NULL;
216
217         get_page(bh->b_page);
218         *res = bh->b_page;
219 err:
220         put_bh(bh);
221         return err;
222 }
223
224 static void write_bdev_super_endio(struct bio *bio, int error)
225 {
226         struct cached_dev *dc = bio->bi_private;
227         /* XXX: error checking */
228
229         closure_put(&dc->sb_write);
230 }
231
232 static void __write_super(struct cache_sb *sb, struct bio *bio)
233 {
234         struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
235         unsigned i;
236
237         bio->bi_iter.bi_sector  = SB_SECTOR;
238         bio->bi_rw              = REQ_SYNC|REQ_META;
239         bio->bi_iter.bi_size    = SB_SIZE;
240         bch_bio_map(bio, NULL);
241
242         out->offset             = cpu_to_le64(sb->offset);
243         out->version            = cpu_to_le64(sb->version);
244
245         memcpy(out->uuid,       sb->uuid, 16);
246         memcpy(out->set_uuid,   sb->set_uuid, 16);
247         memcpy(out->label,      sb->label, SB_LABEL_SIZE);
248
249         out->flags              = cpu_to_le64(sb->flags);
250         out->seq                = cpu_to_le64(sb->seq);
251
252         out->last_mount         = cpu_to_le32(sb->last_mount);
253         out->first_bucket       = cpu_to_le16(sb->first_bucket);
254         out->keys               = cpu_to_le16(sb->keys);
255
256         for (i = 0; i < sb->keys; i++)
257                 out->d[i] = cpu_to_le64(sb->d[i]);
258
259         out->csum = csum_set(out);
260
261         pr_debug("ver %llu, flags %llu, seq %llu",
262                  sb->version, sb->flags, sb->seq);
263
264         submit_bio(REQ_WRITE, bio);
265 }
266
267 static void bch_write_bdev_super_unlock(struct closure *cl)
268 {
269         struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
270
271         up(&dc->sb_write_mutex);
272 }
273
274 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
275 {
276         struct closure *cl = &dc->sb_write;
277         struct bio *bio = &dc->sb_bio;
278
279         down(&dc->sb_write_mutex);
280         closure_init(cl, parent);
281
282         bio_reset(bio);
283         bio->bi_bdev    = dc->bdev;
284         bio->bi_end_io  = write_bdev_super_endio;
285         bio->bi_private = dc;
286
287         closure_get(cl);
288         __write_super(&dc->sb, bio);
289
290         closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
291 }
292
293 static void write_super_endio(struct bio *bio, int error)
294 {
295         struct cache *ca = bio->bi_private;
296
297         bch_count_io_errors(ca, error, "writing superblock");
298         closure_put(&ca->set->sb_write);
299 }
300
301 static void bcache_write_super_unlock(struct closure *cl)
302 {
303         struct cache_set *c = container_of(cl, struct cache_set, sb_write);
304
305         up(&c->sb_write_mutex);
306 }
307
308 void bcache_write_super(struct cache_set *c)
309 {
310         struct closure *cl = &c->sb_write;
311         struct cache *ca;
312         unsigned i;
313
314         down(&c->sb_write_mutex);
315         closure_init(cl, &c->cl);
316
317         c->sb.seq++;
318
319         for_each_cache(ca, c, i) {
320                 struct bio *bio = &ca->sb_bio;
321
322                 ca->sb.version          = BCACHE_SB_VERSION_CDEV_WITH_UUID;
323                 ca->sb.seq              = c->sb.seq;
324                 ca->sb.last_mount       = c->sb.last_mount;
325
326                 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
327
328                 bio_reset(bio);
329                 bio->bi_bdev    = ca->bdev;
330                 bio->bi_end_io  = write_super_endio;
331                 bio->bi_private = ca;
332
333                 closure_get(cl);
334                 __write_super(&ca->sb, bio);
335         }
336
337         closure_return_with_destructor(cl, bcache_write_super_unlock);
338 }
339
340 /* UUID io */
341
342 static void uuid_endio(struct bio *bio, int error)
343 {
344         struct closure *cl = bio->bi_private;
345         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
346
347         cache_set_err_on(error, c, "accessing uuids");
348         bch_bbio_free(bio, c);
349         closure_put(cl);
350 }
351
352 static void uuid_io_unlock(struct closure *cl)
353 {
354         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
355
356         up(&c->uuid_write_mutex);
357 }
358
359 static void uuid_io(struct cache_set *c, unsigned long rw,
360                     struct bkey *k, struct closure *parent)
361 {
362         struct closure *cl = &c->uuid_write;
363         struct uuid_entry *u;
364         unsigned i;
365         char buf[80];
366
367         BUG_ON(!parent);
368         down(&c->uuid_write_mutex);
369         closure_init(cl, parent);
370
371         for (i = 0; i < KEY_PTRS(k); i++) {
372                 struct bio *bio = bch_bbio_alloc(c);
373
374                 bio->bi_rw      = REQ_SYNC|REQ_META|rw;
375                 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
376
377                 bio->bi_end_io  = uuid_endio;
378                 bio->bi_private = cl;
379                 bch_bio_map(bio, c->uuids);
380
381                 bch_submit_bbio(bio, c, k, i);
382
383                 if (!(rw & WRITE))
384                         break;
385         }
386
387         bch_extent_to_text(buf, sizeof(buf), k);
388         pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
389
390         for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
391                 if (!bch_is_zero(u->uuid, 16))
392                         pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
393                                  u - c->uuids, u->uuid, u->label,
394                                  u->first_reg, u->last_reg, u->invalidated);
395
396         closure_return_with_destructor(cl, uuid_io_unlock);
397 }
398
399 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
400 {
401         struct bkey *k = &j->uuid_bucket;
402
403         if (__bch_btree_ptr_invalid(c, k))
404                 return "bad uuid pointer";
405
406         bkey_copy(&c->uuid_bucket, k);
407         uuid_io(c, READ_SYNC, k, cl);
408
409         if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
410                 struct uuid_entry_v0    *u0 = (void *) c->uuids;
411                 struct uuid_entry       *u1 = (void *) c->uuids;
412                 int i;
413
414                 closure_sync(cl);
415
416                 /*
417                  * Since the new uuid entry is bigger than the old, we have to
418                  * convert starting at the highest memory address and work down
419                  * in order to do it in place
420                  */
421
422                 for (i = c->nr_uuids - 1;
423                      i >= 0;
424                      --i) {
425                         memcpy(u1[i].uuid,      u0[i].uuid, 16);
426                         memcpy(u1[i].label,     u0[i].label, 32);
427
428                         u1[i].first_reg         = u0[i].first_reg;
429                         u1[i].last_reg          = u0[i].last_reg;
430                         u1[i].invalidated       = u0[i].invalidated;
431
432                         u1[i].flags     = 0;
433                         u1[i].sectors   = 0;
434                 }
435         }
436
437         return NULL;
438 }
439
440 static int __uuid_write(struct cache_set *c)
441 {
442         BKEY_PADDED(key) k;
443         struct closure cl;
444         closure_init_stack(&cl);
445
446         lockdep_assert_held(&bch_register_lock);
447
448         if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
449                 return 1;
450
451         SET_KEY_SIZE(&k.key, c->sb.bucket_size);
452         uuid_io(c, REQ_WRITE, &k.key, &cl);
453         closure_sync(&cl);
454
455         bkey_copy(&c->uuid_bucket, &k.key);
456         bkey_put(c, &k.key);
457         return 0;
458 }
459
460 int bch_uuid_write(struct cache_set *c)
461 {
462         int ret = __uuid_write(c);
463
464         if (!ret)
465                 bch_journal_meta(c, NULL);
466
467         return ret;
468 }
469
470 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
471 {
472         struct uuid_entry *u;
473
474         for (u = c->uuids;
475              u < c->uuids + c->nr_uuids; u++)
476                 if (!memcmp(u->uuid, uuid, 16))
477                         return u;
478
479         return NULL;
480 }
481
482 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
483 {
484         static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
485         return uuid_find(c, zero_uuid);
486 }
487
488 /*
489  * Bucket priorities/gens:
490  *
491  * For each bucket, we store on disk its
492    * 8 bit gen
493    * 16 bit priority
494  *
495  * See alloc.c for an explanation of the gen. The priority is used to implement
496  * lru (and in the future other) cache replacement policies; for most purposes
497  * it's just an opaque integer.
498  *
499  * The gens and the priorities don't have a whole lot to do with each other, and
500  * it's actually the gens that must be written out at specific times - it's no
501  * big deal if the priorities don't get written, if we lose them we just reuse
502  * buckets in suboptimal order.
503  *
504  * On disk they're stored in a packed array, and in as many buckets are required
505  * to fit them all. The buckets we use to store them form a list; the journal
506  * header points to the first bucket, the first bucket points to the second
507  * bucket, et cetera.
508  *
509  * This code is used by the allocation code; periodically (whenever it runs out
510  * of buckets to allocate from) the allocation code will invalidate some
511  * buckets, but it can't use those buckets until their new gens are safely on
512  * disk.
513  */
514
515 static void prio_endio(struct bio *bio, int error)
516 {
517         struct cache *ca = bio->bi_private;
518
519         cache_set_err_on(error, ca->set, "accessing priorities");
520         bch_bbio_free(bio, ca->set);
521         closure_put(&ca->prio);
522 }
523
524 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
525 {
526         struct closure *cl = &ca->prio;
527         struct bio *bio = bch_bbio_alloc(ca->set);
528
529         closure_init_stack(cl);
530
531         bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
532         bio->bi_bdev            = ca->bdev;
533         bio->bi_rw              = REQ_SYNC|REQ_META|rw;
534         bio->bi_iter.bi_size    = bucket_bytes(ca);
535
536         bio->bi_end_io  = prio_endio;
537         bio->bi_private = ca;
538         bch_bio_map(bio, ca->disk_buckets);
539
540         closure_bio_submit(bio, &ca->prio, ca);
541         closure_sync(cl);
542 }
543
544 void bch_prio_write(struct cache *ca)
545 {
546         int i;
547         struct bucket *b;
548         struct closure cl;
549
550         closure_init_stack(&cl);
551
552         lockdep_assert_held(&ca->set->bucket_lock);
553
554         ca->disk_buckets->seq++;
555
556         atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
557                         &ca->meta_sectors_written);
558
559         //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
560         //       fifo_used(&ca->free_inc), fifo_used(&ca->unused));
561
562         for (i = prio_buckets(ca) - 1; i >= 0; --i) {
563                 long bucket;
564                 struct prio_set *p = ca->disk_buckets;
565                 struct bucket_disk *d = p->data;
566                 struct bucket_disk *end = d + prios_per_bucket(ca);
567
568                 for (b = ca->buckets + i * prios_per_bucket(ca);
569                      b < ca->buckets + ca->sb.nbuckets && d < end;
570                      b++, d++) {
571                         d->prio = cpu_to_le16(b->prio);
572                         d->gen = b->gen;
573                 }
574
575                 p->next_bucket  = ca->prio_buckets[i + 1];
576                 p->magic        = pset_magic(&ca->sb);
577                 p->csum         = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
578
579                 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
580                 BUG_ON(bucket == -1);
581
582                 mutex_unlock(&ca->set->bucket_lock);
583                 prio_io(ca, bucket, REQ_WRITE);
584                 mutex_lock(&ca->set->bucket_lock);
585
586                 ca->prio_buckets[i] = bucket;
587                 atomic_dec_bug(&ca->buckets[bucket].pin);
588         }
589
590         mutex_unlock(&ca->set->bucket_lock);
591
592         bch_journal_meta(ca->set, &cl);
593         closure_sync(&cl);
594
595         mutex_lock(&ca->set->bucket_lock);
596
597         /*
598          * Don't want the old priorities to get garbage collected until after we
599          * finish writing the new ones, and they're journalled
600          */
601         for (i = 0; i < prio_buckets(ca); i++) {
602                 if (ca->prio_last_buckets[i])
603                         __bch_bucket_free(ca,
604                                 &ca->buckets[ca->prio_last_buckets[i]]);
605
606                 ca->prio_last_buckets[i] = ca->prio_buckets[i];
607         }
608 }
609
610 static void prio_read(struct cache *ca, uint64_t bucket)
611 {
612         struct prio_set *p = ca->disk_buckets;
613         struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
614         struct bucket *b;
615         unsigned bucket_nr = 0;
616
617         for (b = ca->buckets;
618              b < ca->buckets + ca->sb.nbuckets;
619              b++, d++) {
620                 if (d == end) {
621                         ca->prio_buckets[bucket_nr] = bucket;
622                         ca->prio_last_buckets[bucket_nr] = bucket;
623                         bucket_nr++;
624
625                         prio_io(ca, bucket, READ_SYNC);
626
627                         if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
628                                 pr_warn("bad csum reading priorities");
629
630                         if (p->magic != pset_magic(&ca->sb))
631                                 pr_warn("bad magic reading priorities");
632
633                         bucket = p->next_bucket;
634                         d = p->data;
635                 }
636
637                 b->prio = le16_to_cpu(d->prio);
638                 b->gen = b->last_gc = d->gen;
639         }
640 }
641
642 /* Bcache device */
643
644 static int open_dev(struct block_device *b, fmode_t mode)
645 {
646         struct bcache_device *d = b->bd_disk->private_data;
647         if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
648                 return -ENXIO;
649
650         closure_get(&d->cl);
651         return 0;
652 }
653
654 static void release_dev(struct gendisk *b, fmode_t mode)
655 {
656         struct bcache_device *d = b->private_data;
657         closure_put(&d->cl);
658 }
659
660 static int ioctl_dev(struct block_device *b, fmode_t mode,
661                      unsigned int cmd, unsigned long arg)
662 {
663         struct bcache_device *d = b->bd_disk->private_data;
664         return d->ioctl(d, mode, cmd, arg);
665 }
666
667 static const struct block_device_operations bcache_ops = {
668         .open           = open_dev,
669         .release        = release_dev,
670         .ioctl          = ioctl_dev,
671         .owner          = THIS_MODULE,
672 };
673
674 void bcache_device_stop(struct bcache_device *d)
675 {
676         if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
677                 closure_queue(&d->cl);
678 }
679
680 static void bcache_device_unlink(struct bcache_device *d)
681 {
682         lockdep_assert_held(&bch_register_lock);
683
684         if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
685                 unsigned i;
686                 struct cache *ca;
687
688                 sysfs_remove_link(&d->c->kobj, d->name);
689                 sysfs_remove_link(&d->kobj, "cache");
690
691                 for_each_cache(ca, d->c, i)
692                         bd_unlink_disk_holder(ca->bdev, d->disk);
693         }
694 }
695
696 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
697                                const char *name)
698 {
699         unsigned i;
700         struct cache *ca;
701
702         for_each_cache(ca, d->c, i)
703                 bd_link_disk_holder(ca->bdev, d->disk);
704
705         snprintf(d->name, BCACHEDEVNAME_SIZE,
706                  "%s%u", name, d->id);
707
708         WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
709              sysfs_create_link(&c->kobj, &d->kobj, d->name),
710              "Couldn't create device <-> cache set symlinks");
711 }
712
713 static void bcache_device_detach(struct bcache_device *d)
714 {
715         lockdep_assert_held(&bch_register_lock);
716
717         if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
718                 struct uuid_entry *u = d->c->uuids + d->id;
719
720                 SET_UUID_FLASH_ONLY(u, 0);
721                 memcpy(u->uuid, invalid_uuid, 16);
722                 u->invalidated = cpu_to_le32(get_seconds());
723                 bch_uuid_write(d->c);
724         }
725
726         bcache_device_unlink(d);
727
728         d->c->devices[d->id] = NULL;
729         closure_put(&d->c->caching);
730         d->c = NULL;
731 }
732
733 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
734                                  unsigned id)
735 {
736         BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags));
737
738         d->id = id;
739         d->c = c;
740         c->devices[id] = d;
741
742         closure_get(&c->caching);
743 }
744
745 static void bcache_device_free(struct bcache_device *d)
746 {
747         lockdep_assert_held(&bch_register_lock);
748
749         pr_info("%s stopped", d->disk->disk_name);
750
751         if (d->c)
752                 bcache_device_detach(d);
753         if (d->disk && d->disk->flags & GENHD_FL_UP)
754                 del_gendisk(d->disk);
755         if (d->disk && d->disk->queue)
756                 blk_cleanup_queue(d->disk->queue);
757         if (d->disk) {
758                 ida_simple_remove(&bcache_minor, d->disk->first_minor);
759                 put_disk(d->disk);
760         }
761
762         bio_split_pool_free(&d->bio_split_hook);
763         if (d->bio_split)
764                 bioset_free(d->bio_split);
765         if (is_vmalloc_addr(d->full_dirty_stripes))
766                 vfree(d->full_dirty_stripes);
767         else
768                 kfree(d->full_dirty_stripes);
769         if (is_vmalloc_addr(d->stripe_sectors_dirty))
770                 vfree(d->stripe_sectors_dirty);
771         else
772                 kfree(d->stripe_sectors_dirty);
773
774         closure_debug_destroy(&d->cl);
775 }
776
777 static int bcache_device_init(struct bcache_device *d, unsigned block_size,
778                               sector_t sectors)
779 {
780         struct request_queue *q;
781         size_t n;
782         int minor;
783
784         if (!d->stripe_size)
785                 d->stripe_size = 1 << 31;
786
787         d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
788
789         if (!d->nr_stripes ||
790             d->nr_stripes > INT_MAX ||
791             d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
792                 pr_err("nr_stripes too large");
793                 return -ENOMEM;
794         }
795
796         n = d->nr_stripes * sizeof(atomic_t);
797         d->stripe_sectors_dirty = n < PAGE_SIZE << 6
798                 ? kzalloc(n, GFP_KERNEL)
799                 : vzalloc(n);
800         if (!d->stripe_sectors_dirty)
801                 return -ENOMEM;
802
803         n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
804         d->full_dirty_stripes = n < PAGE_SIZE << 6
805                 ? kzalloc(n, GFP_KERNEL)
806                 : vzalloc(n);
807         if (!d->full_dirty_stripes)
808                 return -ENOMEM;
809
810         minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
811         if (minor < 0)
812                 return minor;
813
814         if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
815             bio_split_pool_init(&d->bio_split_hook) ||
816             !(d->disk = alloc_disk(1))) {
817                 ida_simple_remove(&bcache_minor, minor);
818                 return -ENOMEM;
819         }
820
821         set_capacity(d->disk, sectors);
822         snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
823
824         d->disk->major          = bcache_major;
825         d->disk->first_minor    = minor;
826         d->disk->fops           = &bcache_ops;
827         d->disk->private_data   = d;
828
829         q = blk_alloc_queue(GFP_KERNEL);
830         if (!q)
831                 return -ENOMEM;
832
833         blk_queue_make_request(q, NULL);
834         d->disk->queue                  = q;
835         q->queuedata                    = d;
836         q->backing_dev_info.congested_data = d;
837         q->limits.max_hw_sectors        = UINT_MAX;
838         q->limits.max_sectors           = UINT_MAX;
839         q->limits.max_segment_size      = UINT_MAX;
840         q->limits.max_segments          = BIO_MAX_PAGES;
841         q->limits.max_discard_sectors   = UINT_MAX;
842         q->limits.discard_granularity   = 512;
843         q->limits.io_min                = block_size;
844         q->limits.logical_block_size    = block_size;
845         q->limits.physical_block_size   = block_size;
846         set_bit(QUEUE_FLAG_NONROT,      &d->disk->queue->queue_flags);
847         set_bit(QUEUE_FLAG_DISCARD,     &d->disk->queue->queue_flags);
848
849         blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
850
851         return 0;
852 }
853
854 /* Cached device */
855
856 static void calc_cached_dev_sectors(struct cache_set *c)
857 {
858         uint64_t sectors = 0;
859         struct cached_dev *dc;
860
861         list_for_each_entry(dc, &c->cached_devs, list)
862                 sectors += bdev_sectors(dc->bdev);
863
864         c->cached_dev_sectors = sectors;
865 }
866
867 void bch_cached_dev_run(struct cached_dev *dc)
868 {
869         struct bcache_device *d = &dc->disk;
870         char buf[SB_LABEL_SIZE + 1];
871         char *env[] = {
872                 "DRIVER=bcache",
873                 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
874                 NULL,
875                 NULL,
876         };
877
878         memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
879         buf[SB_LABEL_SIZE] = '\0';
880         env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
881
882         if (atomic_xchg(&dc->running, 1))
883                 return;
884
885         if (!d->c &&
886             BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
887                 struct closure cl;
888                 closure_init_stack(&cl);
889
890                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
891                 bch_write_bdev_super(dc, &cl);
892                 closure_sync(&cl);
893         }
894
895         add_disk(d->disk);
896         bd_link_disk_holder(dc->bdev, dc->disk.disk);
897         /* won't show up in the uevent file, use udevadm monitor -e instead
898          * only class / kset properties are persistent */
899         kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
900         kfree(env[1]);
901         kfree(env[2]);
902
903         if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
904             sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
905                 pr_debug("error creating sysfs link");
906 }
907
908 static void cached_dev_detach_finish(struct work_struct *w)
909 {
910         struct cached_dev *dc = container_of(w, struct cached_dev, detach);
911         char buf[BDEVNAME_SIZE];
912         struct closure cl;
913         closure_init_stack(&cl);
914
915         BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
916         BUG_ON(atomic_read(&dc->count));
917
918         mutex_lock(&bch_register_lock);
919
920         memset(&dc->sb.set_uuid, 0, 16);
921         SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
922
923         bch_write_bdev_super(dc, &cl);
924         closure_sync(&cl);
925
926         bcache_device_detach(&dc->disk);
927         list_move(&dc->list, &uncached_devices);
928
929         clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
930
931         mutex_unlock(&bch_register_lock);
932
933         pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
934
935         /* Drop ref we took in cached_dev_detach() */
936         closure_put(&dc->disk.cl);
937 }
938
939 void bch_cached_dev_detach(struct cached_dev *dc)
940 {
941         lockdep_assert_held(&bch_register_lock);
942
943         if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
944                 return;
945
946         if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
947                 return;
948
949         /*
950          * Block the device from being closed and freed until we're finished
951          * detaching
952          */
953         closure_get(&dc->disk.cl);
954
955         bch_writeback_queue(dc);
956         cached_dev_put(dc);
957 }
958
959 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
960 {
961         uint32_t rtime = cpu_to_le32(get_seconds());
962         struct uuid_entry *u;
963         char buf[BDEVNAME_SIZE];
964
965         bdevname(dc->bdev, buf);
966
967         if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
968                 return -ENOENT;
969
970         if (dc->disk.c) {
971                 pr_err("Can't attach %s: already attached", buf);
972                 return -EINVAL;
973         }
974
975         if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
976                 pr_err("Can't attach %s: shutting down", buf);
977                 return -EINVAL;
978         }
979
980         if (dc->sb.block_size < c->sb.block_size) {
981                 /* Will die */
982                 pr_err("Couldn't attach %s: block size less than set's block size",
983                        buf);
984                 return -EINVAL;
985         }
986
987         u = uuid_find(c, dc->sb.uuid);
988
989         if (u &&
990             (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
991              BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
992                 memcpy(u->uuid, invalid_uuid, 16);
993                 u->invalidated = cpu_to_le32(get_seconds());
994                 u = NULL;
995         }
996
997         if (!u) {
998                 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
999                         pr_err("Couldn't find uuid for %s in set", buf);
1000                         return -ENOENT;
1001                 }
1002
1003                 u = uuid_find_empty(c);
1004                 if (!u) {
1005                         pr_err("Not caching %s, no room for UUID", buf);
1006                         return -EINVAL;
1007                 }
1008         }
1009
1010         /* Deadlocks since we're called via sysfs...
1011         sysfs_remove_file(&dc->kobj, &sysfs_attach);
1012          */
1013
1014         if (bch_is_zero(u->uuid, 16)) {
1015                 struct closure cl;
1016                 closure_init_stack(&cl);
1017
1018                 memcpy(u->uuid, dc->sb.uuid, 16);
1019                 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1020                 u->first_reg = u->last_reg = rtime;
1021                 bch_uuid_write(c);
1022
1023                 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
1024                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1025
1026                 bch_write_bdev_super(dc, &cl);
1027                 closure_sync(&cl);
1028         } else {
1029                 u->last_reg = rtime;
1030                 bch_uuid_write(c);
1031         }
1032
1033         bcache_device_attach(&dc->disk, c, u - c->uuids);
1034         list_move(&dc->list, &c->cached_devs);
1035         calc_cached_dev_sectors(c);
1036
1037         smp_wmb();
1038         /*
1039          * dc->c must be set before dc->count != 0 - paired with the mb in
1040          * cached_dev_get()
1041          */
1042         atomic_set(&dc->count, 1);
1043
1044         if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1045                 bch_sectors_dirty_init(dc);
1046                 atomic_set(&dc->has_dirty, 1);
1047                 atomic_inc(&dc->count);
1048                 bch_writeback_queue(dc);
1049         }
1050
1051         bch_cached_dev_run(dc);
1052         bcache_device_link(&dc->disk, c, "bdev");
1053
1054         pr_info("Caching %s as %s on set %pU",
1055                 bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
1056                 dc->disk.c->sb.set_uuid);
1057         return 0;
1058 }
1059
1060 void bch_cached_dev_release(struct kobject *kobj)
1061 {
1062         struct cached_dev *dc = container_of(kobj, struct cached_dev,
1063                                              disk.kobj);
1064         kfree(dc);
1065         module_put(THIS_MODULE);
1066 }
1067
1068 static void cached_dev_free(struct closure *cl)
1069 {
1070         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1071
1072         cancel_delayed_work_sync(&dc->writeback_rate_update);
1073         kthread_stop(dc->writeback_thread);
1074
1075         mutex_lock(&bch_register_lock);
1076
1077         if (atomic_read(&dc->running))
1078                 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1079         bcache_device_free(&dc->disk);
1080         list_del(&dc->list);
1081
1082         mutex_unlock(&bch_register_lock);
1083
1084         if (!IS_ERR_OR_NULL(dc->bdev)) {
1085                 if (dc->bdev->bd_disk)
1086                         blk_sync_queue(bdev_get_queue(dc->bdev));
1087
1088                 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1089         }
1090
1091         wake_up(&unregister_wait);
1092
1093         kobject_put(&dc->disk.kobj);
1094 }
1095
1096 static void cached_dev_flush(struct closure *cl)
1097 {
1098         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1099         struct bcache_device *d = &dc->disk;
1100
1101         mutex_lock(&bch_register_lock);
1102         bcache_device_unlink(d);
1103         mutex_unlock(&bch_register_lock);
1104
1105         bch_cache_accounting_destroy(&dc->accounting);
1106         kobject_del(&d->kobj);
1107
1108         continue_at(cl, cached_dev_free, system_wq);
1109 }
1110
1111 static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1112 {
1113         int ret;
1114         struct io *io;
1115         struct request_queue *q = bdev_get_queue(dc->bdev);
1116
1117         __module_get(THIS_MODULE);
1118         INIT_LIST_HEAD(&dc->list);
1119         closure_init(&dc->disk.cl, NULL);
1120         set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1121         kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1122         INIT_WORK(&dc->detach, cached_dev_detach_finish);
1123         sema_init(&dc->sb_write_mutex, 1);
1124         INIT_LIST_HEAD(&dc->io_lru);
1125         spin_lock_init(&dc->io_lock);
1126         bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1127
1128         dc->sequential_cutoff           = 4 << 20;
1129
1130         for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1131                 list_add(&io->lru, &dc->io_lru);
1132                 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1133         }
1134
1135         dc->disk.stripe_size = q->limits.io_opt >> 9;
1136
1137         if (dc->disk.stripe_size)
1138                 dc->partial_stripes_expensive =
1139                         q->limits.raid_partial_stripes_expensive;
1140
1141         ret = bcache_device_init(&dc->disk, block_size,
1142                          dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1143         if (ret)
1144                 return ret;
1145
1146         set_capacity(dc->disk.disk,
1147                      dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1148
1149         dc->disk.disk->queue->backing_dev_info.ra_pages =
1150                 max(dc->disk.disk->queue->backing_dev_info.ra_pages,
1151                     q->backing_dev_info.ra_pages);
1152
1153         bch_cached_dev_request_init(dc);
1154         bch_cached_dev_writeback_init(dc);
1155         return 0;
1156 }
1157
1158 /* Cached device - bcache superblock */
1159
1160 static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1161                                  struct block_device *bdev,
1162                                  struct cached_dev *dc)
1163 {
1164         char name[BDEVNAME_SIZE];
1165         const char *err = "cannot allocate memory";
1166         struct cache_set *c;
1167
1168         memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1169         dc->bdev = bdev;
1170         dc->bdev->bd_holder = dc;
1171
1172         bio_init(&dc->sb_bio);
1173         dc->sb_bio.bi_max_vecs  = 1;
1174         dc->sb_bio.bi_io_vec    = dc->sb_bio.bi_inline_vecs;
1175         dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1176         get_page(sb_page);
1177
1178         if (cached_dev_init(dc, sb->block_size << 9))
1179                 goto err;
1180
1181         err = "error creating kobject";
1182         if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1183                         "bcache"))
1184                 goto err;
1185         if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1186                 goto err;
1187
1188         pr_info("registered backing device %s", bdevname(bdev, name));
1189
1190         list_add(&dc->list, &uncached_devices);
1191         list_for_each_entry(c, &bch_cache_sets, list)
1192                 bch_cached_dev_attach(dc, c);
1193
1194         if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1195             BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1196                 bch_cached_dev_run(dc);
1197
1198         return;
1199 err:
1200         pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1201         bcache_device_stop(&dc->disk);
1202 }
1203
1204 /* Flash only volumes */
1205
1206 void bch_flash_dev_release(struct kobject *kobj)
1207 {
1208         struct bcache_device *d = container_of(kobj, struct bcache_device,
1209                                                kobj);
1210         kfree(d);
1211 }
1212
1213 static void flash_dev_free(struct closure *cl)
1214 {
1215         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1216         bcache_device_free(d);
1217         kobject_put(&d->kobj);
1218 }
1219
1220 static void flash_dev_flush(struct closure *cl)
1221 {
1222         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1223
1224         bcache_device_unlink(d);
1225         kobject_del(&d->kobj);
1226         continue_at(cl, flash_dev_free, system_wq);
1227 }
1228
1229 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1230 {
1231         struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1232                                           GFP_KERNEL);
1233         if (!d)
1234                 return -ENOMEM;
1235
1236         closure_init(&d->cl, NULL);
1237         set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1238
1239         kobject_init(&d->kobj, &bch_flash_dev_ktype);
1240
1241         if (bcache_device_init(d, block_bytes(c), u->sectors))
1242                 goto err;
1243
1244         bcache_device_attach(d, c, u - c->uuids);
1245         bch_flash_dev_request_init(d);
1246         add_disk(d->disk);
1247
1248         if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1249                 goto err;
1250
1251         bcache_device_link(d, c, "volume");
1252
1253         return 0;
1254 err:
1255         kobject_put(&d->kobj);
1256         return -ENOMEM;
1257 }
1258
1259 static int flash_devs_run(struct cache_set *c)
1260 {
1261         int ret = 0;
1262         struct uuid_entry *u;
1263
1264         for (u = c->uuids;
1265              u < c->uuids + c->nr_uuids && !ret;
1266              u++)
1267                 if (UUID_FLASH_ONLY(u))
1268                         ret = flash_dev_run(c, u);
1269
1270         return ret;
1271 }
1272
1273 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1274 {
1275         struct uuid_entry *u;
1276
1277         if (test_bit(CACHE_SET_STOPPING, &c->flags))
1278                 return -EINTR;
1279
1280         u = uuid_find_empty(c);
1281         if (!u) {
1282                 pr_err("Can't create volume, no room for UUID");
1283                 return -EINVAL;
1284         }
1285
1286         get_random_bytes(u->uuid, 16);
1287         memset(u->label, 0, 32);
1288         u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
1289
1290         SET_UUID_FLASH_ONLY(u, 1);
1291         u->sectors = size >> 9;
1292
1293         bch_uuid_write(c);
1294
1295         return flash_dev_run(c, u);
1296 }
1297
1298 /* Cache set */
1299
1300 __printf(2, 3)
1301 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1302 {
1303         va_list args;
1304
1305         if (c->on_error != ON_ERROR_PANIC &&
1306             test_bit(CACHE_SET_STOPPING, &c->flags))
1307                 return false;
1308
1309         /* XXX: we can be called from atomic context
1310         acquire_console_sem();
1311         */
1312
1313         printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
1314
1315         va_start(args, fmt);
1316         vprintk(fmt, args);
1317         va_end(args);
1318
1319         printk(", disabling caching\n");
1320
1321         if (c->on_error == ON_ERROR_PANIC)
1322                 panic("panic forced after error\n");
1323
1324         bch_cache_set_unregister(c);
1325         return true;
1326 }
1327
1328 void bch_cache_set_release(struct kobject *kobj)
1329 {
1330         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1331         kfree(c);
1332         module_put(THIS_MODULE);
1333 }
1334
1335 static void cache_set_free(struct closure *cl)
1336 {
1337         struct cache_set *c = container_of(cl, struct cache_set, cl);
1338         struct cache *ca;
1339         unsigned i;
1340
1341         if (!IS_ERR_OR_NULL(c->debug))
1342                 debugfs_remove(c->debug);
1343
1344         bch_open_buckets_free(c);
1345         bch_btree_cache_free(c);
1346         bch_journal_free(c);
1347
1348         for_each_cache(ca, c, i)
1349                 if (ca)
1350                         kobject_put(&ca->kobj);
1351
1352         bch_bset_sort_state_free(&c->sort);
1353         free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1354
1355         if (c->moving_gc_wq)
1356                 destroy_workqueue(c->moving_gc_wq);
1357         if (c->bio_split)
1358                 bioset_free(c->bio_split);
1359         if (c->fill_iter)
1360                 mempool_destroy(c->fill_iter);
1361         if (c->bio_meta)
1362                 mempool_destroy(c->bio_meta);
1363         if (c->search)
1364                 mempool_destroy(c->search);
1365         kfree(c->devices);
1366
1367         mutex_lock(&bch_register_lock);
1368         list_del(&c->list);
1369         mutex_unlock(&bch_register_lock);
1370
1371         pr_info("Cache set %pU unregistered", c->sb.set_uuid);
1372         wake_up(&unregister_wait);
1373
1374         closure_debug_destroy(&c->cl);
1375         kobject_put(&c->kobj);
1376 }
1377
1378 static void cache_set_flush(struct closure *cl)
1379 {
1380         struct cache_set *c = container_of(cl, struct cache_set, caching);
1381         struct cache *ca;
1382         struct btree *b;
1383         unsigned i;
1384
1385         bch_cache_accounting_destroy(&c->accounting);
1386
1387         kobject_put(&c->internal);
1388         kobject_del(&c->kobj);
1389
1390         if (c->gc_thread)
1391                 kthread_stop(c->gc_thread);
1392
1393         if (!IS_ERR_OR_NULL(c->root))
1394                 list_add(&c->root->list, &c->btree_cache);
1395
1396         /* Should skip this if we're unregistering because of an error */
1397         list_for_each_entry(b, &c->btree_cache, list) {
1398                 mutex_lock(&b->write_lock);
1399                 if (btree_node_dirty(b))
1400                         __bch_btree_node_write(b, NULL);
1401                 mutex_unlock(&b->write_lock);
1402         }
1403
1404         for_each_cache(ca, c, i)
1405                 if (ca->alloc_thread)
1406                         kthread_stop(ca->alloc_thread);
1407
1408         cancel_delayed_work_sync(&c->journal.work);
1409         /* flush last journal entry if needed */
1410         c->journal.work.work.func(&c->journal.work.work);
1411
1412         closure_return(cl);
1413 }
1414
1415 static void __cache_set_unregister(struct closure *cl)
1416 {
1417         struct cache_set *c = container_of(cl, struct cache_set, caching);
1418         struct cached_dev *dc;
1419         size_t i;
1420
1421         mutex_lock(&bch_register_lock);
1422
1423         for (i = 0; i < c->nr_uuids; i++)
1424                 if (c->devices[i]) {
1425                         if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1426                             test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1427                                 dc = container_of(c->devices[i],
1428                                                   struct cached_dev, disk);
1429                                 bch_cached_dev_detach(dc);
1430                         } else {
1431                                 bcache_device_stop(c->devices[i]);
1432                         }
1433                 }
1434
1435         mutex_unlock(&bch_register_lock);
1436
1437         continue_at(cl, cache_set_flush, system_wq);
1438 }
1439
1440 void bch_cache_set_stop(struct cache_set *c)
1441 {
1442         if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1443                 closure_queue(&c->caching);
1444 }
1445
1446 void bch_cache_set_unregister(struct cache_set *c)
1447 {
1448         set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1449         bch_cache_set_stop(c);
1450 }
1451
1452 #define alloc_bucket_pages(gfp, c)                      \
1453         ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1454
1455 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1456 {
1457         int iter_size;
1458         struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1459         if (!c)
1460                 return NULL;
1461
1462         __module_get(THIS_MODULE);
1463         closure_init(&c->cl, NULL);
1464         set_closure_fn(&c->cl, cache_set_free, system_wq);
1465
1466         closure_init(&c->caching, &c->cl);
1467         set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1468
1469         /* Maybe create continue_at_noreturn() and use it here? */
1470         closure_set_stopped(&c->cl);
1471         closure_put(&c->cl);
1472
1473         kobject_init(&c->kobj, &bch_cache_set_ktype);
1474         kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1475
1476         bch_cache_accounting_init(&c->accounting, &c->cl);
1477
1478         memcpy(c->sb.set_uuid, sb->set_uuid, 16);
1479         c->sb.block_size        = sb->block_size;
1480         c->sb.bucket_size       = sb->bucket_size;
1481         c->sb.nr_in_set         = sb->nr_in_set;
1482         c->sb.last_mount        = sb->last_mount;
1483         c->bucket_bits          = ilog2(sb->bucket_size);
1484         c->block_bits           = ilog2(sb->block_size);
1485         c->nr_uuids             = bucket_bytes(c) / sizeof(struct uuid_entry);
1486
1487         c->btree_pages          = bucket_pages(c);
1488         if (c->btree_pages > BTREE_MAX_PAGES)
1489                 c->btree_pages = max_t(int, c->btree_pages / 4,
1490                                        BTREE_MAX_PAGES);
1491
1492         sema_init(&c->sb_write_mutex, 1);
1493         mutex_init(&c->bucket_lock);
1494         init_waitqueue_head(&c->btree_cache_wait);
1495         init_waitqueue_head(&c->bucket_wait);
1496         sema_init(&c->uuid_write_mutex, 1);
1497
1498         spin_lock_init(&c->btree_gc_time.lock);
1499         spin_lock_init(&c->btree_split_time.lock);
1500         spin_lock_init(&c->btree_read_time.lock);
1501
1502         bch_moving_init_cache_set(c);
1503
1504         INIT_LIST_HEAD(&c->list);
1505         INIT_LIST_HEAD(&c->cached_devs);
1506         INIT_LIST_HEAD(&c->btree_cache);
1507         INIT_LIST_HEAD(&c->btree_cache_freeable);
1508         INIT_LIST_HEAD(&c->btree_cache_freed);
1509         INIT_LIST_HEAD(&c->data_buckets);
1510
1511         c->search = mempool_create_slab_pool(32, bch_search_cache);
1512         if (!c->search)
1513                 goto err;
1514
1515         iter_size = (sb->bucket_size / sb->block_size + 1) *
1516                 sizeof(struct btree_iter_set);
1517
1518         if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
1519             !(c->bio_meta = mempool_create_kmalloc_pool(2,
1520                                 sizeof(struct bbio) + sizeof(struct bio_vec) *
1521                                 bucket_pages(c))) ||
1522             !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
1523             !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
1524             !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1525             !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
1526             bch_journal_alloc(c) ||
1527             bch_btree_cache_alloc(c) ||
1528             bch_open_buckets_alloc(c) ||
1529             bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1530                 goto err;
1531
1532         c->congested_read_threshold_us  = 2000;
1533         c->congested_write_threshold_us = 20000;
1534         c->error_limit  = 8 << IO_ERROR_SHIFT;
1535
1536         return c;
1537 err:
1538         bch_cache_set_unregister(c);
1539         return NULL;
1540 }
1541
1542 static void run_cache_set(struct cache_set *c)
1543 {
1544         const char *err = "cannot allocate memory";
1545         struct cached_dev *dc, *t;
1546         struct cache *ca;
1547         struct closure cl;
1548         unsigned i;
1549
1550         closure_init_stack(&cl);
1551
1552         for_each_cache(ca, c, i)
1553                 c->nbuckets += ca->sb.nbuckets;
1554
1555         if (CACHE_SYNC(&c->sb)) {
1556                 LIST_HEAD(journal);
1557                 struct bkey *k;
1558                 struct jset *j;
1559
1560                 err = "cannot allocate memory for journal";
1561                 if (bch_journal_read(c, &journal))
1562                         goto err;
1563
1564                 pr_debug("btree_journal_read() done");
1565
1566                 err = "no journal entries found";
1567                 if (list_empty(&journal))
1568                         goto err;
1569
1570                 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1571
1572                 err = "IO error reading priorities";
1573                 for_each_cache(ca, c, i)
1574                         prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
1575
1576                 /*
1577                  * If prio_read() fails it'll call cache_set_error and we'll
1578                  * tear everything down right away, but if we perhaps checked
1579                  * sooner we could avoid journal replay.
1580                  */
1581
1582                 k = &j->btree_root;
1583
1584                 err = "bad btree root";
1585                 if (__bch_btree_ptr_invalid(c, k))
1586                         goto err;
1587
1588                 err = "error reading btree root";
1589                 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);
1590                 if (IS_ERR_OR_NULL(c->root))
1591                         goto err;
1592
1593                 list_del_init(&c->root->list);
1594                 rw_unlock(true, c->root);
1595
1596                 err = uuid_read(c, j, &cl);
1597                 if (err)
1598                         goto err;
1599
1600                 err = "error in recovery";
1601                 if (bch_btree_check(c))
1602                         goto err;
1603
1604                 bch_journal_mark(c, &journal);
1605                 bch_initial_gc_finish(c);
1606                 pr_debug("btree_check() done");
1607
1608                 /*
1609                  * bcache_journal_next() can't happen sooner, or
1610                  * btree_gc_finish() will give spurious errors about last_gc >
1611                  * gc_gen - this is a hack but oh well.
1612                  */
1613                 bch_journal_next(&c->journal);
1614
1615                 err = "error starting allocator thread";
1616                 for_each_cache(ca, c, i)
1617                         if (bch_cache_allocator_start(ca))
1618                                 goto err;
1619
1620                 /*
1621                  * First place it's safe to allocate: btree_check() and
1622                  * btree_gc_finish() have to run before we have buckets to
1623                  * allocate, and bch_bucket_alloc_set() might cause a journal
1624                  * entry to be written so bcache_journal_next() has to be called
1625                  * first.
1626                  *
1627                  * If the uuids were in the old format we have to rewrite them
1628                  * before the next journal entry is written:
1629                  */
1630                 if (j->version < BCACHE_JSET_VERSION_UUID)
1631                         __uuid_write(c);
1632
1633                 bch_journal_replay(c, &journal);
1634         } else {
1635                 pr_notice("invalidating existing data");
1636
1637                 for_each_cache(ca, c, i) {
1638                         unsigned j;
1639
1640                         ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
1641                                               2, SB_JOURNAL_BUCKETS);
1642
1643                         for (j = 0; j < ca->sb.keys; j++)
1644                                 ca->sb.d[j] = ca->sb.first_bucket + j;
1645                 }
1646
1647                 bch_initial_gc_finish(c);
1648
1649                 err = "error starting allocator thread";
1650                 for_each_cache(ca, c, i)
1651                         if (bch_cache_allocator_start(ca))
1652                                 goto err;
1653
1654                 mutex_lock(&c->bucket_lock);
1655                 for_each_cache(ca, c, i)
1656                         bch_prio_write(ca);
1657                 mutex_unlock(&c->bucket_lock);
1658
1659                 err = "cannot allocate new UUID bucket";
1660                 if (__uuid_write(c))
1661                         goto err;
1662
1663                 err = "cannot allocate new btree root";
1664                 c->root = bch_btree_node_alloc(c, NULL, 0);
1665                 if (IS_ERR_OR_NULL(c->root))
1666                         goto err;
1667
1668                 mutex_lock(&c->root->write_lock);
1669                 bkey_copy_key(&c->root->key, &MAX_KEY);
1670                 bch_btree_node_write(c->root, &cl);
1671                 mutex_unlock(&c->root->write_lock);
1672
1673                 bch_btree_set_root(c->root);
1674                 rw_unlock(true, c->root);
1675
1676                 /*
1677                  * We don't want to write the first journal entry until
1678                  * everything is set up - fortunately journal entries won't be
1679                  * written until the SET_CACHE_SYNC() here:
1680                  */
1681                 SET_CACHE_SYNC(&c->sb, true);
1682
1683                 bch_journal_next(&c->journal);
1684                 bch_journal_meta(c, &cl);
1685         }
1686
1687         err = "error starting gc thread";
1688         if (bch_gc_thread_start(c))
1689                 goto err;
1690
1691         closure_sync(&cl);
1692         c->sb.last_mount = get_seconds();
1693         bcache_write_super(c);
1694
1695         list_for_each_entry_safe(dc, t, &uncached_devices, list)
1696                 bch_cached_dev_attach(dc, c);
1697
1698         flash_devs_run(c);
1699
1700         return;
1701 err:
1702         closure_sync(&cl);
1703         /* XXX: test this, it's broken */
1704         bch_cache_set_error(c, "%s", err);
1705 }
1706
1707 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1708 {
1709         return ca->sb.block_size        == c->sb.block_size &&
1710                 ca->sb.bucket_size      == c->sb.bucket_size &&
1711                 ca->sb.nr_in_set        == c->sb.nr_in_set;
1712 }
1713
1714 static const char *register_cache_set(struct cache *ca)
1715 {
1716         char buf[12];
1717         const char *err = "cannot allocate memory";
1718         struct cache_set *c;
1719
1720         list_for_each_entry(c, &bch_cache_sets, list)
1721                 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
1722                         if (c->cache[ca->sb.nr_this_dev])
1723                                 return "duplicate cache set member";
1724
1725                         if (!can_attach_cache(ca, c))
1726                                 return "cache sb does not match set";
1727
1728                         if (!CACHE_SYNC(&ca->sb))
1729                                 SET_CACHE_SYNC(&c->sb, false);
1730
1731                         goto found;
1732                 }
1733
1734         c = bch_cache_set_alloc(&ca->sb);
1735         if (!c)
1736                 return err;
1737
1738         err = "error creating kobject";
1739         if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
1740             kobject_add(&c->internal, &c->kobj, "internal"))
1741                 goto err;
1742
1743         if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
1744                 goto err;
1745
1746         bch_debug_init_cache_set(c);
1747
1748         list_add(&c->list, &bch_cache_sets);
1749 found:
1750         sprintf(buf, "cache%i", ca->sb.nr_this_dev);
1751         if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
1752             sysfs_create_link(&c->kobj, &ca->kobj, buf))
1753                 goto err;
1754
1755         if (ca->sb.seq > c->sb.seq) {
1756                 c->sb.version           = ca->sb.version;
1757                 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
1758                 c->sb.flags             = ca->sb.flags;
1759                 c->sb.seq               = ca->sb.seq;
1760                 pr_debug("set version = %llu", c->sb.version);
1761         }
1762
1763         ca->set = c;
1764         ca->set->cache[ca->sb.nr_this_dev] = ca;
1765         c->cache_by_alloc[c->caches_loaded++] = ca;
1766
1767         if (c->caches_loaded == c->sb.nr_in_set)
1768                 run_cache_set(c);
1769
1770         return NULL;
1771 err:
1772         bch_cache_set_unregister(c);
1773         return err;
1774 }
1775
1776 /* Cache device */
1777
1778 void bch_cache_release(struct kobject *kobj)
1779 {
1780         struct cache *ca = container_of(kobj, struct cache, kobj);
1781         unsigned i;
1782
1783         if (ca->set)
1784                 ca->set->cache[ca->sb.nr_this_dev] = NULL;
1785
1786         bio_split_pool_free(&ca->bio_split_hook);
1787
1788         free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1789         kfree(ca->prio_buckets);
1790         vfree(ca->buckets);
1791
1792         free_heap(&ca->heap);
1793         free_fifo(&ca->free_inc);
1794
1795         for (i = 0; i < RESERVE_NR; i++)
1796                 free_fifo(&ca->free[i]);
1797
1798         if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1799                 put_page(ca->sb_bio.bi_io_vec[0].bv_page);
1800
1801         if (!IS_ERR_OR_NULL(ca->bdev)) {
1802                 blk_sync_queue(bdev_get_queue(ca->bdev));
1803                 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1804         }
1805
1806         kfree(ca);
1807         module_put(THIS_MODULE);
1808 }
1809
1810 static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1811 {
1812         size_t free;
1813         struct bucket *b;
1814
1815         __module_get(THIS_MODULE);
1816         kobject_init(&ca->kobj, &bch_cache_ktype);
1817
1818         bio_init(&ca->journal.bio);
1819         ca->journal.bio.bi_max_vecs = 8;
1820         ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
1821
1822         free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1823
1824         if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
1825             !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1826             !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1827             !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
1828             !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
1829             !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
1830             !(ca->buckets       = vzalloc(sizeof(struct bucket) *
1831                                           ca->sb.nbuckets)) ||
1832             !(ca->prio_buckets  = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1833                                           2, GFP_KERNEL)) ||
1834             !(ca->disk_buckets  = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1835             bio_split_pool_init(&ca->bio_split_hook))
1836                 return -ENOMEM;
1837
1838         ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1839
1840         for_each_bucket(b, ca)
1841                 atomic_set(&b->pin, 0);
1842
1843         return 0;
1844 }
1845
1846 static void register_cache(struct cache_sb *sb, struct page *sb_page,
1847                                   struct block_device *bdev, struct cache *ca)
1848 {
1849         char name[BDEVNAME_SIZE];
1850         const char *err = "cannot allocate memory";
1851
1852         memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1853         ca->bdev = bdev;
1854         ca->bdev->bd_holder = ca;
1855
1856         bio_init(&ca->sb_bio);
1857         ca->sb_bio.bi_max_vecs  = 1;
1858         ca->sb_bio.bi_io_vec    = ca->sb_bio.bi_inline_vecs;
1859         ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1860         get_page(sb_page);
1861
1862         if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1863                 ca->discard = CACHE_DISCARD(&ca->sb);
1864
1865         if (cache_alloc(sb, ca) != 0)
1866                 goto err;
1867
1868         err = "error creating kobject";
1869         if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
1870                 goto err;
1871
1872         mutex_lock(&bch_register_lock);
1873         err = register_cache_set(ca);
1874         mutex_unlock(&bch_register_lock);
1875
1876         if (err)
1877                 goto err;
1878
1879         pr_info("registered cache device %s", bdevname(bdev, name));
1880         return;
1881 err:
1882         pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1883         kobject_put(&ca->kobj);
1884 }
1885
1886 /* Global interfaces/init */
1887
1888 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
1889                                const char *, size_t);
1890
1891 kobj_attribute_write(register,          register_bcache);
1892 kobj_attribute_write(register_quiet,    register_bcache);
1893
1894 static bool bch_is_open_backing(struct block_device *bdev) {
1895         struct cache_set *c, *tc;
1896         struct cached_dev *dc, *t;
1897
1898         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1899                 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1900                         if (dc->bdev == bdev)
1901                                 return true;
1902         list_for_each_entry_safe(dc, t, &uncached_devices, list)
1903                 if (dc->bdev == bdev)
1904                         return true;
1905         return false;
1906 }
1907
1908 static bool bch_is_open_cache(struct block_device *bdev) {
1909         struct cache_set *c, *tc;
1910         struct cache *ca;
1911         unsigned i;
1912
1913         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1914                 for_each_cache(ca, c, i)
1915                         if (ca->bdev == bdev)
1916                                 return true;
1917         return false;
1918 }
1919
1920 static bool bch_is_open(struct block_device *bdev) {
1921         return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
1922 }
1923
1924 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1925                                const char *buffer, size_t size)
1926 {
1927         ssize_t ret = size;
1928         const char *err = "cannot allocate memory";
1929         char *path = NULL;
1930         struct cache_sb *sb = NULL;
1931         struct block_device *bdev = NULL;
1932         struct page *sb_page = NULL;
1933
1934         if (!try_module_get(THIS_MODULE))
1935                 return -EBUSY;
1936
1937         if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
1938             !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
1939                 goto err;
1940
1941         err = "failed to open device";
1942         bdev = blkdev_get_by_path(strim(path),
1943                                   FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1944                                   sb);
1945         if (IS_ERR(bdev)) {
1946                 if (bdev == ERR_PTR(-EBUSY)) {
1947                         bdev = lookup_bdev(strim(path));
1948                         if (!IS_ERR(bdev) && bch_is_open(bdev))
1949                                 err = "device already registered";
1950                         else
1951                                 err = "device busy";
1952                 }
1953                 goto err;
1954         }
1955
1956         err = "failed to set blocksize";
1957         if (set_blocksize(bdev, 4096))
1958                 goto err_close;
1959
1960         err = read_super(sb, bdev, &sb_page);
1961         if (err)
1962                 goto err_close;
1963
1964         if (SB_IS_BDEV(sb)) {
1965                 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1966                 if (!dc)
1967                         goto err_close;
1968
1969                 mutex_lock(&bch_register_lock);
1970                 register_bdev(sb, sb_page, bdev, dc);
1971                 mutex_unlock(&bch_register_lock);
1972         } else {
1973                 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1974                 if (!ca)
1975                         goto err_close;
1976
1977                 register_cache(sb, sb_page, bdev, ca);
1978         }
1979 out:
1980         if (sb_page)
1981                 put_page(sb_page);
1982         kfree(sb);
1983         kfree(path);
1984         module_put(THIS_MODULE);
1985         return ret;
1986
1987 err_close:
1988         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1989 err:
1990         if (attr != &ksysfs_register_quiet)
1991                 pr_info("error opening %s: %s", path, err);
1992         ret = -EINVAL;
1993         goto out;
1994 }
1995
1996 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
1997 {
1998         if (code == SYS_DOWN ||
1999             code == SYS_HALT ||
2000             code == SYS_POWER_OFF) {
2001                 DEFINE_WAIT(wait);
2002                 unsigned long start = jiffies;
2003                 bool stopped = false;
2004
2005                 struct cache_set *c, *tc;
2006                 struct cached_dev *dc, *tdc;
2007
2008                 mutex_lock(&bch_register_lock);
2009
2010                 if (list_empty(&bch_cache_sets) &&
2011                     list_empty(&uncached_devices))
2012                         goto out;
2013
2014                 pr_info("Stopping all devices:");
2015
2016                 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2017                         bch_cache_set_stop(c);
2018
2019                 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2020                         bcache_device_stop(&dc->disk);
2021
2022                 /* What's a condition variable? */
2023                 while (1) {
2024                         long timeout = start + 2 * HZ - jiffies;
2025
2026                         stopped = list_empty(&bch_cache_sets) &&
2027                                 list_empty(&uncached_devices);
2028
2029                         if (timeout < 0 || stopped)
2030                                 break;
2031
2032                         prepare_to_wait(&unregister_wait, &wait,
2033                                         TASK_UNINTERRUPTIBLE);
2034
2035                         mutex_unlock(&bch_register_lock);
2036                         schedule_timeout(timeout);
2037                         mutex_lock(&bch_register_lock);
2038                 }
2039
2040                 finish_wait(&unregister_wait, &wait);
2041
2042                 if (stopped)
2043                         pr_info("All devices stopped");
2044                 else
2045                         pr_notice("Timeout waiting for devices to be closed");
2046 out:
2047                 mutex_unlock(&bch_register_lock);
2048         }
2049
2050         return NOTIFY_DONE;
2051 }
2052
2053 static struct notifier_block reboot = {
2054         .notifier_call  = bcache_reboot,
2055         .priority       = INT_MAX, /* before any real devices */
2056 };
2057
2058 static void bcache_exit(void)
2059 {
2060         bch_debug_exit();
2061         bch_request_exit();
2062         if (bcache_kobj)
2063                 kobject_put(bcache_kobj);
2064         if (bcache_wq)
2065                 destroy_workqueue(bcache_wq);
2066         if (bcache_major)
2067                 unregister_blkdev(bcache_major, "bcache");
2068         unregister_reboot_notifier(&reboot);
2069 }
2070
2071 static int __init bcache_init(void)
2072 {
2073         static const struct attribute *files[] = {
2074                 &ksysfs_register.attr,
2075                 &ksysfs_register_quiet.attr,
2076                 NULL
2077         };
2078
2079         mutex_init(&bch_register_lock);
2080         init_waitqueue_head(&unregister_wait);
2081         register_reboot_notifier(&reboot);
2082         closure_debug_init();
2083
2084         bcache_major = register_blkdev(0, "bcache");
2085         if (bcache_major < 0)
2086                 return bcache_major;
2087
2088         if (!(bcache_wq = create_workqueue("bcache")) ||
2089             !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
2090             sysfs_create_files(bcache_kobj, files) ||
2091             bch_request_init() ||
2092             bch_debug_init(bcache_kobj))
2093                 goto err;
2094
2095         return 0;
2096 err:
2097         bcache_exit();
2098         return -ENOMEM;
2099 }
2100
2101 module_exit(bcache_exit);
2102 module_init(bcache_init);