2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
19 #include <trace/events/bcache.h>
21 #define CUTOFF_CACHE_ADD 95
22 #define CUTOFF_CACHE_READA 90
24 struct kmem_cache *bch_search_cache;
26 static void bch_data_insert_start(struct closure *);
28 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
30 return BDEV_CACHE_MODE(&dc->sb);
33 static bool verify(struct cached_dev *dc, struct bio *bio)
38 static void bio_csum(struct bio *bio, struct bkey *k)
41 struct bvec_iter iter;
44 bio_for_each_segment(bv, bio, iter) {
45 void *d = kmap(bv.bv_page) + bv.bv_offset;
46 csum = bch_crc64_update(csum, d, bv.bv_len);
50 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
53 /* Insert data into cache */
55 static void bch_data_insert_keys(struct closure *cl)
57 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
58 atomic_t *journal_ref = NULL;
59 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 * If we're looping, might already be waiting on
64 * another journal write - can't wait on more than one journal write at
67 * XXX: this looks wrong
70 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
75 journal_ref = bch_journal(op->c, &op->insert_keys,
76 op->flush_journal ? cl : NULL);
78 ret = bch_btree_insert(op->c, &op->insert_keys,
79 journal_ref, replace_key);
81 op->replace_collision = true;
84 op->insert_data_done = true;
88 atomic_dec_bug(journal_ref);
90 if (!op->insert_data_done)
91 continue_at(cl, bch_data_insert_start, op->wq);
93 bch_keylist_free(&op->insert_keys);
97 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
100 size_t oldsize = bch_keylist_nkeys(l);
101 size_t newsize = oldsize + u64s;
104 * The journalling code doesn't handle the case where the keys to insert
105 * is bigger than an empty write: If we just return -ENOMEM here,
106 * bio_insert() and bio_invalidate() will insert the keys created so far
107 * and finish the rest when the keylist is empty.
109 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
112 return __bch_keylist_realloc(l, u64s);
115 static void bch_data_invalidate(struct closure *cl)
117 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
118 struct bio *bio = op->bio;
120 pr_debug("invalidating %i sectors from %llu",
121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
123 while (bio_sectors(bio)) {
124 unsigned sectors = min(bio_sectors(bio),
125 1U << (KEY_SIZE_BITS - 1));
127 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
130 bio->bi_iter.bi_sector += sectors;
131 bio->bi_iter.bi_size -= sectors << 9;
133 bch_keylist_add(&op->insert_keys,
134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
137 op->insert_data_done = true;
140 continue_at(cl, bch_data_insert_keys, op->wq);
143 static void bch_data_insert_error(struct closure *cl)
145 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
148 * Our data write just errored, which means we've got a bunch of keys to
149 * insert that point to data that wasn't succesfully written.
151 * We don't have to insert those keys but we still have to invalidate
152 * that region of the cache - so, if we just strip off all the pointers
153 * from the keys we'll accomplish just that.
156 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
158 while (src != op->insert_keys.top) {
159 struct bkey *n = bkey_next(src);
161 SET_KEY_PTRS(src, 0);
162 memmove(dst, src, bkey_bytes(src));
164 dst = bkey_next(dst);
168 op->insert_keys.top = dst;
170 bch_data_insert_keys(cl);
173 static void bch_data_insert_endio(struct bio *bio, int error)
175 struct closure *cl = bio->bi_private;
176 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
179 /* TODO: We could try to recover from this. */
182 else if (!op->replace)
183 set_closure_fn(cl, bch_data_insert_error, op->wq);
185 set_closure_fn(cl, NULL, NULL);
188 bch_bbio_endio(op->c, bio, error, "writing data to cache");
191 static void bch_data_insert_start(struct closure *cl)
193 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
194 struct bio *bio = op->bio, *n;
196 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
197 set_gc_sectors(op->c);
202 return bch_data_invalidate(cl);
205 * Journal writes are marked REQ_FLUSH; if the original write was a
206 * flush, it'll wait on the journal write.
208 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
213 struct bio_set *split = op->c->bio_split;
215 /* 1 for the device pointer and 1 for the chksum */
216 if (bch_keylist_realloc(&op->insert_keys,
217 3 + (op->csum ? 1 : 0),
219 continue_at(cl, bch_data_insert_keys, op->wq);
221 k = op->insert_keys.top;
223 SET_KEY_INODE(k, op->inode);
224 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
226 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
227 op->write_point, op->write_prio,
231 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
233 n->bi_end_io = bch_data_insert_endio;
237 SET_KEY_DIRTY(k, true);
239 for (i = 0; i < KEY_PTRS(k); i++)
240 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
244 SET_KEY_CSUM(k, op->csum);
248 trace_bcache_cache_insert(k);
249 bch_keylist_push(&op->insert_keys);
251 n->bi_rw |= REQ_WRITE;
252 bch_submit_bbio(n, op->c, k, 0);
255 op->insert_data_done = true;
256 continue_at(cl, bch_data_insert_keys, op->wq);
258 /* bch_alloc_sectors() blocks if s->writeback = true */
259 BUG_ON(op->writeback);
262 * But if it's not a writeback write we'd rather just bail out if
263 * there aren't any buckets ready to write to - it might take awhile and
264 * we might be starving btree writes for gc or something.
269 * Writethrough write: We can't complete the write until we've
270 * updated the index. But we don't want to delay the write while
271 * we wait for buckets to be freed up, so just invalidate the
275 return bch_data_invalidate(cl);
278 * From a cache miss, we can just insert the keys for the data
279 * we have written or bail out if we didn't do anything.
281 op->insert_data_done = true;
284 if (!bch_keylist_empty(&op->insert_keys))
285 continue_at(cl, bch_data_insert_keys, op->wq);
292 * bch_data_insert - stick some data in the cache
294 * This is the starting point for any data to end up in a cache device; it could
295 * be from a normal write, or a writeback write, or a write to a flash only
296 * volume - it's also used by the moving garbage collector to compact data in
297 * mostly empty buckets.
299 * It first writes the data to the cache, creating a list of keys to be inserted
300 * (if the data had to be fragmented there will be multiple keys); after the
301 * data is written it calls bch_journal, and after the keys have been added to
302 * the next journal write they're inserted into the btree.
304 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
305 * and op->inode is used for the key inode.
307 * If s->bypass is true, instead of inserting the data it invalidates the
308 * region of the cache represented by s->cache_bio and op->inode.
310 void bch_data_insert(struct closure *cl)
312 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
314 trace_bcache_write(op->c, op->inode, op->bio,
315 op->writeback, op->bypass);
317 bch_keylist_init(&op->insert_keys);
319 bch_data_insert_start(cl);
324 unsigned bch_get_congested(struct cache_set *c)
329 if (!c->congested_read_threshold_us &&
330 !c->congested_write_threshold_us)
333 i = (local_clock_us() - c->congested_last_us) / 1024;
337 i += atomic_read(&c->congested);
344 i = fract_exp_two(i, 6);
346 rand = get_random_int();
347 i -= bitmap_weight(&rand, BITS_PER_LONG);
349 return i > 0 ? i : 1;
352 static void add_sequential(struct task_struct *t)
354 ewma_add(t->sequential_io_avg,
355 t->sequential_io, 8, 0);
357 t->sequential_io = 0;
360 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
362 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
365 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
367 struct cache_set *c = dc->disk.c;
368 unsigned mode = cache_mode(dc, bio);
369 unsigned sectors, congested = bch_get_congested(c);
370 struct task_struct *task = current;
373 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
374 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
375 (bio->bi_rw & REQ_DISCARD))
378 if (mode == CACHE_MODE_NONE ||
379 (mode == CACHE_MODE_WRITEAROUND &&
380 (bio->bi_rw & REQ_WRITE)))
383 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
384 bio_sectors(bio) & (c->sb.block_size - 1)) {
385 pr_debug("skipping unaligned io");
389 if (bypass_torture_test(dc)) {
390 if ((get_random_int() & 3) == 3)
396 if (!congested && !dc->sequential_cutoff)
400 mode == CACHE_MODE_WRITEBACK &&
401 (bio->bi_rw & REQ_WRITE) &&
402 (bio->bi_rw & REQ_SYNC))
405 spin_lock(&dc->io_lock);
407 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
408 if (i->last == bio->bi_iter.bi_sector &&
409 time_before(jiffies, i->jiffies))
412 i = list_first_entry(&dc->io_lru, struct io, lru);
414 add_sequential(task);
417 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
418 i->sequential += bio->bi_iter.bi_size;
420 i->last = bio_end_sector(bio);
421 i->jiffies = jiffies + msecs_to_jiffies(5000);
422 task->sequential_io = i->sequential;
425 hlist_add_head(&i->hash, iohash(dc, i->last));
426 list_move_tail(&i->lru, &dc->io_lru);
428 spin_unlock(&dc->io_lock);
430 sectors = max(task->sequential_io,
431 task->sequential_io_avg) >> 9;
433 if (dc->sequential_cutoff &&
434 sectors >= dc->sequential_cutoff >> 9) {
435 trace_bcache_bypass_sequential(bio);
439 if (congested && sectors >= congested) {
440 trace_bcache_bypass_congested(bio);
445 bch_rescale_priorities(c, bio_sectors(bio));
448 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
455 /* Stack frame for bio_complete */
459 struct bio *orig_bio;
460 struct bio *cache_miss;
461 struct bcache_device *d;
463 unsigned insert_bio_sectors;
464 unsigned recoverable:1;
466 unsigned read_dirty_data:1;
468 unsigned long start_time;
471 struct data_insert_op iop;
474 static void bch_cache_read_endio(struct bio *bio, int error)
476 struct bbio *b = container_of(bio, struct bbio, bio);
477 struct closure *cl = bio->bi_private;
478 struct search *s = container_of(cl, struct search, cl);
481 * If the bucket was reused while our bio was in flight, we might have
482 * read the wrong data. Set s->error but not error so it doesn't get
483 * counted against the cache device, but we'll still reread the data
484 * from the backing device.
488 s->iop.error = error;
489 else if (!KEY_DIRTY(&b->key) &&
490 ptr_stale(s->iop.c, &b->key, 0)) {
491 atomic_long_inc(&s->iop.c->cache_read_races);
492 s->iop.error = -EINTR;
495 bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
499 * Read from a single key, handling the initial cache miss if the key starts in
500 * the middle of the bio
502 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
504 struct search *s = container_of(op, struct search, op);
505 struct bio *n, *bio = &s->bio.bio;
506 struct bkey *bio_key;
509 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
512 if (KEY_INODE(k) != s->iop.inode ||
513 KEY_START(k) > bio->bi_iter.bi_sector) {
514 unsigned bio_sectors = bio_sectors(bio);
515 unsigned sectors = KEY_INODE(k) == s->iop.inode
516 ? min_t(uint64_t, INT_MAX,
517 KEY_START(k) - bio->bi_iter.bi_sector)
520 int ret = s->d->cache_miss(b, s, bio, sectors);
521 if (ret != MAP_CONTINUE)
524 /* if this was a complete miss we shouldn't get here */
525 BUG_ON(bio_sectors <= sectors);
531 /* XXX: figure out best pointer - for multiple cache devices */
534 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
537 s->read_dirty_data = true;
539 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
540 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
541 GFP_NOIO, s->d->bio_split);
543 bio_key = &container_of(n, struct bbio, bio)->key;
544 bch_bkey_copy_single_ptr(bio_key, k, ptr);
546 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
547 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
549 n->bi_end_io = bch_cache_read_endio;
550 n->bi_private = &s->cl;
553 * The bucket we're reading from might be reused while our bio
554 * is in flight, and we could then end up reading the wrong
557 * We guard against this by checking (in cache_read_endio()) if
558 * the pointer is stale again; if so, we treat it as an error
559 * and reread from the backing device (but we don't pass that
560 * error up anywhere).
563 __bch_submit_bbio(n, b->c);
564 return n == bio ? MAP_DONE : MAP_CONTINUE;
567 static void cache_lookup(struct closure *cl)
569 struct search *s = container_of(cl, struct search, iop.cl);
570 struct bio *bio = &s->bio.bio;
573 bch_btree_op_init(&s->op, -1);
575 ret = bch_btree_map_keys(&s->op, s->iop.c,
576 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
577 cache_lookup_fn, MAP_END_KEY);
579 continue_at(cl, cache_lookup, bcache_wq);
584 /* Common code for the make_request functions */
586 static void request_endio(struct bio *bio, int error)
588 struct closure *cl = bio->bi_private;
591 struct search *s = container_of(cl, struct search, cl);
592 s->iop.error = error;
593 /* Only cache read errors are recoverable */
594 s->recoverable = false;
601 static void bio_complete(struct search *s)
604 int cpu, rw = bio_data_dir(s->orig_bio);
605 unsigned long duration = jiffies - s->start_time;
607 cpu = part_stat_lock();
608 part_round_stats(cpu, &s->d->disk->part0);
609 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
612 trace_bcache_request_end(s->d, s->orig_bio);
613 bio_endio(s->orig_bio, s->iop.error);
618 static void do_bio_hook(struct search *s, struct bio *orig_bio)
620 struct bio *bio = &s->bio.bio;
623 __bio_clone_fast(bio, orig_bio);
624 bio->bi_end_io = request_endio;
625 bio->bi_private = &s->cl;
627 atomic_set(&bio->bi_cnt, 3);
630 static void search_free(struct closure *cl)
632 struct search *s = container_of(cl, struct search, cl);
638 closure_debug_destroy(cl);
639 mempool_free(s, s->d->c->search);
642 static inline struct search *search_alloc(struct bio *bio,
643 struct bcache_device *d)
647 s = mempool_alloc(d->c->search, GFP_NOIO);
649 closure_init(&s->cl, NULL);
653 s->cache_miss = NULL;
656 s->write = (bio->bi_rw & REQ_WRITE) != 0;
657 s->read_dirty_data = 0;
658 s->start_time = jiffies;
662 s->iop.inode = d->id;
663 s->iop.write_point = hash_long((unsigned long) current, 16);
664 s->iop.write_prio = 0;
667 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
668 s->iop.wq = bcache_wq;
675 static void cached_dev_bio_complete(struct closure *cl)
677 struct search *s = container_of(cl, struct search, cl);
678 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
686 static void cached_dev_cache_miss_done(struct closure *cl)
688 struct search *s = container_of(cl, struct search, cl);
690 if (s->iop.replace_collision)
691 bch_mark_cache_miss_collision(s->iop.c, s->d);
697 bio_for_each_segment_all(bv, s->iop.bio, i)
698 __free_page(bv->bv_page);
701 cached_dev_bio_complete(cl);
704 static void cached_dev_read_error(struct closure *cl)
706 struct search *s = container_of(cl, struct search, cl);
707 struct bio *bio = &s->bio.bio;
709 if (s->recoverable) {
710 /* Retry from the backing device: */
711 trace_bcache_read_retry(s->orig_bio);
714 do_bio_hook(s, s->orig_bio);
716 /* XXX: invalidate cache */
718 closure_bio_submit(bio, cl, s->d);
721 continue_at(cl, cached_dev_cache_miss_done, NULL);
724 static void cached_dev_read_done(struct closure *cl)
726 struct search *s = container_of(cl, struct search, cl);
727 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
730 * We had a cache miss; cache_bio now contains data ready to be inserted
733 * First, we copy the data we just read from cache_bio's bounce buffers
734 * to the buffers the original bio pointed to:
738 bio_reset(s->iop.bio);
739 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
740 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
741 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
742 bch_bio_map(s->iop.bio, NULL);
744 bio_copy_data(s->cache_miss, s->iop.bio);
746 bio_put(s->cache_miss);
747 s->cache_miss = NULL;
750 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
751 bch_data_verify(dc, s->orig_bio);
756 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
757 BUG_ON(!s->iop.replace);
758 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
761 continue_at(cl, cached_dev_cache_miss_done, NULL);
764 static void cached_dev_read_done_bh(struct closure *cl)
766 struct search *s = container_of(cl, struct search, cl);
767 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
769 bch_mark_cache_accounting(s->iop.c, s->d,
770 !s->cache_miss, s->iop.bypass);
771 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
774 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
775 else if (s->iop.bio || verify(dc, &s->bio.bio))
776 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
778 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
781 static int cached_dev_cache_miss(struct btree *b, struct search *s,
782 struct bio *bio, unsigned sectors)
784 int ret = MAP_CONTINUE;
786 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
787 struct bio *miss, *cache_bio;
789 if (s->cache_miss || s->iop.bypass) {
790 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
791 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
795 if (!(bio->bi_rw & REQ_RAHEAD) &&
796 !(bio->bi_rw & REQ_META) &&
797 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
798 reada = min_t(sector_t, dc->readahead >> 9,
799 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
801 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
803 s->iop.replace_key = KEY(s->iop.inode,
804 bio->bi_iter.bi_sector + s->insert_bio_sectors,
805 s->insert_bio_sectors);
807 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
811 s->iop.replace = true;
813 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
815 /* btree_search_recurse()'s btree iterator is no good anymore */
816 ret = miss == bio ? MAP_DONE : -EINTR;
818 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
819 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
824 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
825 cache_bio->bi_bdev = miss->bi_bdev;
826 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
828 cache_bio->bi_end_io = request_endio;
829 cache_bio->bi_private = &s->cl;
831 bch_bio_map(cache_bio, NULL);
832 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
836 bch_mark_cache_readahead(s->iop.c, s->d);
838 s->cache_miss = miss;
839 s->iop.bio = cache_bio;
841 closure_bio_submit(cache_bio, &s->cl, s->d);
847 miss->bi_end_io = request_endio;
848 miss->bi_private = &s->cl;
849 closure_bio_submit(miss, &s->cl, s->d);
853 static void cached_dev_read(struct cached_dev *dc, struct search *s)
855 struct closure *cl = &s->cl;
857 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
858 continue_at(cl, cached_dev_read_done_bh, NULL);
863 static void cached_dev_write_complete(struct closure *cl)
865 struct search *s = container_of(cl, struct search, cl);
866 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
868 up_read_non_owner(&dc->writeback_lock);
869 cached_dev_bio_complete(cl);
872 static void cached_dev_write(struct cached_dev *dc, struct search *s)
874 struct closure *cl = &s->cl;
875 struct bio *bio = &s->bio.bio;
876 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
877 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
879 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
881 down_read_non_owner(&dc->writeback_lock);
882 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
884 * We overlap with some dirty data undergoing background
885 * writeback, force this write to writeback
887 s->iop.bypass = false;
888 s->iop.writeback = true;
892 * Discards aren't _required_ to do anything, so skipping if
893 * check_overlapping returned true is ok
895 * But check_overlapping drops dirty keys for which io hasn't started,
896 * so we still want to call it.
898 if (bio->bi_rw & REQ_DISCARD)
899 s->iop.bypass = true;
901 if (should_writeback(dc, s->orig_bio,
904 s->iop.bypass = false;
905 s->iop.writeback = true;
909 s->iop.bio = s->orig_bio;
912 if (!(bio->bi_rw & REQ_DISCARD) ||
913 blk_queue_discard(bdev_get_queue(dc->bdev)))
914 closure_bio_submit(bio, cl, s->d);
915 } else if (s->iop.writeback) {
916 bch_writeback_add(dc);
919 if (bio->bi_rw & REQ_FLUSH) {
920 /* Also need to send a flush to the backing device */
921 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
924 flush->bi_rw = WRITE_FLUSH;
925 flush->bi_bdev = bio->bi_bdev;
926 flush->bi_end_io = request_endio;
927 flush->bi_private = cl;
929 closure_bio_submit(flush, cl, s->d);
932 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
934 closure_bio_submit(bio, cl, s->d);
937 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
938 continue_at(cl, cached_dev_write_complete, NULL);
941 static void cached_dev_nodata(struct closure *cl)
943 struct search *s = container_of(cl, struct search, cl);
944 struct bio *bio = &s->bio.bio;
946 if (s->iop.flush_journal)
947 bch_journal_meta(s->iop.c, cl);
949 /* If it's a flush, we send the flush to the backing device too */
950 closure_bio_submit(bio, cl, s->d);
952 continue_at(cl, cached_dev_bio_complete, NULL);
955 /* Cached devices - read & write stuff */
957 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
960 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
961 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
962 int cpu, rw = bio_data_dir(bio);
964 cpu = part_stat_lock();
965 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
966 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
969 bio->bi_bdev = dc->bdev;
970 bio->bi_iter.bi_sector += dc->sb.data_offset;
972 if (cached_dev_get(dc)) {
973 s = search_alloc(bio, d);
974 trace_bcache_request_start(s->d, bio);
976 if (!bio->bi_iter.bi_size) {
978 * can't call bch_journal_meta from under
979 * generic_make_request
981 continue_at_nobarrier(&s->cl,
985 s->iop.bypass = check_should_bypass(dc, bio);
988 cached_dev_write(dc, s);
990 cached_dev_read(dc, s);
993 if ((bio->bi_rw & REQ_DISCARD) &&
994 !blk_queue_discard(bdev_get_queue(dc->bdev)))
997 bch_generic_make_request(bio, &d->bio_split_hook);
1001 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1002 unsigned int cmd, unsigned long arg)
1004 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1005 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1008 static int cached_dev_congested(void *data, int bits)
1010 struct bcache_device *d = data;
1011 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1012 struct request_queue *q = bdev_get_queue(dc->bdev);
1015 if (bdi_congested(&q->backing_dev_info, bits))
1018 if (cached_dev_get(dc)) {
1022 for_each_cache(ca, d->c, i) {
1023 q = bdev_get_queue(ca->bdev);
1024 ret |= bdi_congested(&q->backing_dev_info, bits);
1033 void bch_cached_dev_request_init(struct cached_dev *dc)
1035 struct gendisk *g = dc->disk.disk;
1037 g->queue->make_request_fn = cached_dev_make_request;
1038 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1039 dc->disk.cache_miss = cached_dev_cache_miss;
1040 dc->disk.ioctl = cached_dev_ioctl;
1043 /* Flash backed devices */
1045 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1046 struct bio *bio, unsigned sectors)
1048 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1050 swap(bio->bi_iter.bi_size, bytes);
1052 swap(bio->bi_iter.bi_size, bytes);
1054 bio_advance(bio, bytes);
1056 if (!bio->bi_iter.bi_size)
1059 return MAP_CONTINUE;
1062 static void flash_dev_nodata(struct closure *cl)
1064 struct search *s = container_of(cl, struct search, cl);
1066 if (s->iop.flush_journal)
1067 bch_journal_meta(s->iop.c, cl);
1069 continue_at(cl, search_free, NULL);
1072 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1076 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1077 int cpu, rw = bio_data_dir(bio);
1079 cpu = part_stat_lock();
1080 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1081 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1084 s = search_alloc(bio, d);
1088 trace_bcache_request_start(s->d, bio);
1090 if (!bio->bi_iter.bi_size) {
1092 * can't call bch_journal_meta from under
1093 * generic_make_request
1095 continue_at_nobarrier(&s->cl,
1099 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1100 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1101 &KEY(d->id, bio_end_sector(bio), 0));
1103 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1104 s->iop.writeback = true;
1107 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1109 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1112 continue_at(cl, search_free, NULL);
1115 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1116 unsigned int cmd, unsigned long arg)
1121 static int flash_dev_congested(void *data, int bits)
1123 struct bcache_device *d = data;
1124 struct request_queue *q;
1129 for_each_cache(ca, d->c, i) {
1130 q = bdev_get_queue(ca->bdev);
1131 ret |= bdi_congested(&q->backing_dev_info, bits);
1137 void bch_flash_dev_request_init(struct bcache_device *d)
1139 struct gendisk *g = d->disk;
1141 g->queue->make_request_fn = flash_dev_make_request;
1142 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1143 d->cache_miss = flash_dev_cache_miss;
1144 d->ioctl = flash_dev_ioctl;
1147 void bch_request_exit(void)
1149 if (bch_search_cache)
1150 kmem_cache_destroy(bch_search_cache);
1153 int __init bch_request_init(void)
1155 bch_search_cache = KMEM_CACHE(search, 0);
1156 if (!bch_search_cache)