drivers: video: rockchip: vcodec_dma_map_sg maybe fail
[firefly-linux-kernel-4.4.55.git] / drivers / lightnvm / rrpc.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16
17 #include "rrpc.h"
18
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23                                 struct nvm_rq *rqd, unsigned long flags);
24
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26                 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27                         (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31         struct rrpc_block *rblk = a->rblk;
32         unsigned int pg_offset;
33
34         lockdep_assert_held(&rrpc->rev_lock);
35
36         if (a->addr == ADDR_EMPTY || !rblk)
37                 return;
38
39         spin_lock(&rblk->lock);
40
41         div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
42         WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43         rblk->nr_invalid_pages++;
44
45         spin_unlock(&rblk->lock);
46
47         rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
48 }
49
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
51                                                                 unsigned len)
52 {
53         sector_t i;
54
55         spin_lock(&rrpc->rev_lock);
56         for (i = slba; i < slba + len; i++) {
57                 struct rrpc_addr *gp = &rrpc->trans_map[i];
58
59                 rrpc_page_invalidate(rrpc, gp);
60                 gp->rblk = NULL;
61         }
62         spin_unlock(&rrpc->rev_lock);
63 }
64
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66                                         sector_t laddr, unsigned int pages)
67 {
68         struct nvm_rq *rqd;
69         struct rrpc_inflight_rq *inf;
70
71         rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
72         if (!rqd)
73                 return ERR_PTR(-ENOMEM);
74
75         inf = rrpc_get_inflight_rq(rqd);
76         if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77                 mempool_free(rqd, rrpc->rq_pool);
78                 return NULL;
79         }
80
81         return rqd;
82 }
83
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
85 {
86         struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
87
88         rrpc_unlock_laddr(rrpc, inf);
89
90         mempool_free(rqd, rrpc->rq_pool);
91 }
92
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
94 {
95         sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96         sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
97         struct nvm_rq *rqd;
98
99         do {
100                 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
101                 schedule();
102         } while (!rqd);
103
104         if (IS_ERR(rqd)) {
105                 pr_err("rrpc: unable to acquire inflight IO\n");
106                 bio_io_error(bio);
107                 return;
108         }
109
110         rrpc_invalidate_range(rrpc, slba, len);
111         rrpc_inflight_laddr_release(rrpc, rqd);
112 }
113
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
115 {
116         return (rblk->next_page == rrpc->dev->pgs_per_blk);
117 }
118
119 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
120 {
121         struct nvm_block *blk = rblk->parent;
122
123         return blk->id * rrpc->dev->pgs_per_blk;
124 }
125
126 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127                                                         struct ppa_addr r)
128 {
129         struct ppa_addr l;
130         int secs, pgs, blks, luns;
131         sector_t ppa = r.ppa;
132
133         l.ppa = 0;
134
135         div_u64_rem(ppa, dev->sec_per_pg, &secs);
136         l.g.sec = secs;
137
138         sector_div(ppa, dev->sec_per_pg);
139         div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140         l.g.pg = pgs;
141
142         sector_div(ppa, dev->pgs_per_blk);
143         div_u64_rem(ppa, dev->blks_per_lun, &blks);
144         l.g.blk = blks;
145
146         sector_div(ppa, dev->blks_per_lun);
147         div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148         l.g.lun = luns;
149
150         sector_div(ppa, dev->luns_per_chnl);
151         l.g.ch = ppa;
152
153         return l;
154 }
155
156 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
157 {
158         struct ppa_addr paddr;
159
160         paddr.ppa = addr;
161         return linear_to_generic_addr(dev, paddr);
162 }
163
164 /* requires lun->lock taken */
165 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
166 {
167         struct rrpc *rrpc = rlun->rrpc;
168
169         BUG_ON(!rblk);
170
171         if (rlun->cur) {
172                 spin_lock(&rlun->cur->lock);
173                 WARN_ON(!block_is_full(rrpc, rlun->cur));
174                 spin_unlock(&rlun->cur->lock);
175         }
176         rlun->cur = rblk;
177 }
178
179 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
180                                                         unsigned long flags)
181 {
182         struct nvm_block *blk;
183         struct rrpc_block *rblk;
184
185         blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
186         if (!blk)
187                 return NULL;
188
189         rblk = &rlun->blocks[blk->id];
190         blk->priv = rblk;
191
192         bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
193         rblk->next_page = 0;
194         rblk->nr_invalid_pages = 0;
195         atomic_set(&rblk->data_cmnt_size, 0);
196
197         return rblk;
198 }
199
200 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
201 {
202         nvm_put_blk(rrpc->dev, rblk->parent);
203 }
204
205 static void rrpc_put_blks(struct rrpc *rrpc)
206 {
207         struct rrpc_lun *rlun;
208         int i;
209
210         for (i = 0; i < rrpc->nr_luns; i++) {
211                 rlun = &rrpc->luns[i];
212                 if (rlun->cur)
213                         rrpc_put_blk(rrpc, rlun->cur);
214                 if (rlun->gc_cur)
215                         rrpc_put_blk(rrpc, rlun->gc_cur);
216         }
217 }
218
219 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
220 {
221         int next = atomic_inc_return(&rrpc->next_lun);
222
223         return &rrpc->luns[next % rrpc->nr_luns];
224 }
225
226 static void rrpc_gc_kick(struct rrpc *rrpc)
227 {
228         struct rrpc_lun *rlun;
229         unsigned int i;
230
231         for (i = 0; i < rrpc->nr_luns; i++) {
232                 rlun = &rrpc->luns[i];
233                 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
234         }
235 }
236
237 /*
238  * timed GC every interval.
239  */
240 static void rrpc_gc_timer(unsigned long data)
241 {
242         struct rrpc *rrpc = (struct rrpc *)data;
243
244         rrpc_gc_kick(rrpc);
245         mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
246 }
247
248 static void rrpc_end_sync_bio(struct bio *bio)
249 {
250         struct completion *waiting = bio->bi_private;
251
252         if (bio->bi_error)
253                 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
254
255         complete(waiting);
256 }
257
258 /*
259  * rrpc_move_valid_pages -- migrate live data off the block
260  * @rrpc: the 'rrpc' structure
261  * @block: the block from which to migrate live pages
262  *
263  * Description:
264  *   GC algorithms may call this function to migrate remaining live
265  *   pages off the block prior to erasing it. This function blocks
266  *   further execution until the operation is complete.
267  */
268 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
269 {
270         struct request_queue *q = rrpc->dev->q;
271         struct rrpc_rev_addr *rev;
272         struct nvm_rq *rqd;
273         struct bio *bio;
274         struct page *page;
275         int slot;
276         int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
277         u64 phys_addr;
278         DECLARE_COMPLETION_ONSTACK(wait);
279
280         if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
281                 return 0;
282
283         bio = bio_alloc(GFP_NOIO, 1);
284         if (!bio) {
285                 pr_err("nvm: could not alloc bio to gc\n");
286                 return -ENOMEM;
287         }
288
289         page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
290         if (!page) {
291                 bio_put(bio);
292                 return -ENOMEM;
293         }
294
295         while ((slot = find_first_zero_bit(rblk->invalid_pages,
296                                             nr_pgs_per_blk)) < nr_pgs_per_blk) {
297
298                 /* Lock laddr */
299                 phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
300
301 try:
302                 spin_lock(&rrpc->rev_lock);
303                 /* Get logical address from physical to logical table */
304                 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
305                 /* already updated by previous regular write */
306                 if (rev->addr == ADDR_EMPTY) {
307                         spin_unlock(&rrpc->rev_lock);
308                         continue;
309                 }
310
311                 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
312                 if (IS_ERR_OR_NULL(rqd)) {
313                         spin_unlock(&rrpc->rev_lock);
314                         schedule();
315                         goto try;
316                 }
317
318                 spin_unlock(&rrpc->rev_lock);
319
320                 /* Perform read to do GC */
321                 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
322                 bio->bi_rw = READ;
323                 bio->bi_private = &wait;
324                 bio->bi_end_io = rrpc_end_sync_bio;
325
326                 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
327                 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
328
329                 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
330                         pr_err("rrpc: gc read failed.\n");
331                         rrpc_inflight_laddr_release(rrpc, rqd);
332                         goto finished;
333                 }
334                 wait_for_completion_io(&wait);
335
336                 bio_reset(bio);
337                 reinit_completion(&wait);
338
339                 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
340                 bio->bi_rw = WRITE;
341                 bio->bi_private = &wait;
342                 bio->bi_end_io = rrpc_end_sync_bio;
343
344                 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
345
346                 /* turn the command around and write the data back to a new
347                  * address
348                  */
349                 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
350                         pr_err("rrpc: gc write failed.\n");
351                         rrpc_inflight_laddr_release(rrpc, rqd);
352                         goto finished;
353                 }
354                 wait_for_completion_io(&wait);
355
356                 rrpc_inflight_laddr_release(rrpc, rqd);
357
358                 bio_reset(bio);
359         }
360
361 finished:
362         mempool_free(page, rrpc->page_pool);
363         bio_put(bio);
364
365         if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
366                 pr_err("nvm: failed to garbage collect block\n");
367                 return -EIO;
368         }
369
370         return 0;
371 }
372
373 static void rrpc_block_gc(struct work_struct *work)
374 {
375         struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
376                                                                         ws_gc);
377         struct rrpc *rrpc = gcb->rrpc;
378         struct rrpc_block *rblk = gcb->rblk;
379         struct nvm_dev *dev = rrpc->dev;
380
381         pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
382
383         if (rrpc_move_valid_pages(rrpc, rblk))
384                 goto done;
385
386         nvm_erase_blk(dev, rblk->parent);
387         rrpc_put_blk(rrpc, rblk);
388 done:
389         mempool_free(gcb, rrpc->gcb_pool);
390 }
391
392 /* the block with highest number of invalid pages, will be in the beginning
393  * of the list
394  */
395 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
396                                                         struct rrpc_block *rb)
397 {
398         if (ra->nr_invalid_pages == rb->nr_invalid_pages)
399                 return ra;
400
401         return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
402 }
403
404 /* linearly find the block with highest number of invalid pages
405  * requires lun->lock
406  */
407 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
408 {
409         struct list_head *prio_list = &rlun->prio_list;
410         struct rrpc_block *rblock, *max;
411
412         BUG_ON(list_empty(prio_list));
413
414         max = list_first_entry(prio_list, struct rrpc_block, prio);
415         list_for_each_entry(rblock, prio_list, prio)
416                 max = rblock_max_invalid(max, rblock);
417
418         return max;
419 }
420
421 static void rrpc_lun_gc(struct work_struct *work)
422 {
423         struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
424         struct rrpc *rrpc = rlun->rrpc;
425         struct nvm_lun *lun = rlun->parent;
426         struct rrpc_block_gc *gcb;
427         unsigned int nr_blocks_need;
428
429         nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
430
431         if (nr_blocks_need < rrpc->nr_luns)
432                 nr_blocks_need = rrpc->nr_luns;
433
434         spin_lock(&rlun->lock);
435         while (nr_blocks_need > lun->nr_free_blocks &&
436                                         !list_empty(&rlun->prio_list)) {
437                 struct rrpc_block *rblock = block_prio_find_max(rlun);
438                 struct nvm_block *block = rblock->parent;
439
440                 if (!rblock->nr_invalid_pages)
441                         break;
442
443                 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
444                 if (!gcb)
445                         break;
446
447                 list_del_init(&rblock->prio);
448
449                 BUG_ON(!block_is_full(rrpc, rblock));
450
451                 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
452
453                 gcb->rrpc = rrpc;
454                 gcb->rblk = rblock;
455                 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
456
457                 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
458
459                 nr_blocks_need--;
460         }
461         spin_unlock(&rlun->lock);
462
463         /* TODO: Hint that request queue can be started again */
464 }
465
466 static void rrpc_gc_queue(struct work_struct *work)
467 {
468         struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
469                                                                         ws_gc);
470         struct rrpc *rrpc = gcb->rrpc;
471         struct rrpc_block *rblk = gcb->rblk;
472         struct nvm_lun *lun = rblk->parent->lun;
473         struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
474
475         spin_lock(&rlun->lock);
476         list_add_tail(&rblk->prio, &rlun->prio_list);
477         spin_unlock(&rlun->lock);
478
479         mempool_free(gcb, rrpc->gcb_pool);
480         pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
481                                                         rblk->parent->id);
482 }
483
484 static const struct block_device_operations rrpc_fops = {
485         .owner          = THIS_MODULE,
486 };
487
488 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
489 {
490         unsigned int i;
491         struct rrpc_lun *rlun, *max_free;
492
493         if (!is_gc)
494                 return get_next_lun(rrpc);
495
496         /* during GC, we don't care about RR, instead we want to make
497          * sure that we maintain evenness between the block luns.
498          */
499         max_free = &rrpc->luns[0];
500         /* prevent GC-ing lun from devouring pages of a lun with
501          * little free blocks. We don't take the lock as we only need an
502          * estimate.
503          */
504         rrpc_for_each_lun(rrpc, rlun, i) {
505                 if (rlun->parent->nr_free_blocks >
506                                         max_free->parent->nr_free_blocks)
507                         max_free = rlun;
508         }
509
510         return max_free;
511 }
512
513 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
514                                         struct rrpc_block *rblk, u64 paddr)
515 {
516         struct rrpc_addr *gp;
517         struct rrpc_rev_addr *rev;
518
519         BUG_ON(laddr >= rrpc->nr_pages);
520
521         gp = &rrpc->trans_map[laddr];
522         spin_lock(&rrpc->rev_lock);
523         if (gp->rblk)
524                 rrpc_page_invalidate(rrpc, gp);
525
526         gp->addr = paddr;
527         gp->rblk = rblk;
528
529         rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
530         rev->addr = laddr;
531         spin_unlock(&rrpc->rev_lock);
532
533         return gp;
534 }
535
536 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
537 {
538         u64 addr = ADDR_EMPTY;
539
540         spin_lock(&rblk->lock);
541         if (block_is_full(rrpc, rblk))
542                 goto out;
543
544         addr = block_to_addr(rrpc, rblk) + rblk->next_page;
545
546         rblk->next_page++;
547 out:
548         spin_unlock(&rblk->lock);
549         return addr;
550 }
551
552 /* Simple round-robin Logical to physical address translation.
553  *
554  * Retrieve the mapping using the active append point. Then update the ap for
555  * the next write to the disk.
556  *
557  * Returns rrpc_addr with the physical address and block. Remember to return to
558  * rrpc->addr_cache when request is finished.
559  */
560 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
561                                                                 int is_gc)
562 {
563         struct rrpc_lun *rlun;
564         struct rrpc_block *rblk;
565         struct nvm_lun *lun;
566         u64 paddr;
567
568         rlun = rrpc_get_lun_rr(rrpc, is_gc);
569         lun = rlun->parent;
570
571         if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
572                 return NULL;
573
574         spin_lock(&rlun->lock);
575
576         rblk = rlun->cur;
577 retry:
578         paddr = rrpc_alloc_addr(rrpc, rblk);
579
580         if (paddr == ADDR_EMPTY) {
581                 rblk = rrpc_get_blk(rrpc, rlun, 0);
582                 if (rblk) {
583                         rrpc_set_lun_cur(rlun, rblk);
584                         goto retry;
585                 }
586
587                 if (is_gc) {
588                         /* retry from emergency gc block */
589                         paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
590                         if (paddr == ADDR_EMPTY) {
591                                 rblk = rrpc_get_blk(rrpc, rlun, 1);
592                                 if (!rblk) {
593                                         pr_err("rrpc: no more blocks");
594                                         goto err;
595                                 }
596
597                                 rlun->gc_cur = rblk;
598                                 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
599                         }
600                         rblk = rlun->gc_cur;
601                 }
602         }
603
604         spin_unlock(&rlun->lock);
605         return rrpc_update_map(rrpc, laddr, rblk, paddr);
606 err:
607         spin_unlock(&rlun->lock);
608         return NULL;
609 }
610
611 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
612 {
613         struct rrpc_block_gc *gcb;
614
615         gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
616         if (!gcb) {
617                 pr_err("rrpc: unable to queue block for gc.");
618                 return;
619         }
620
621         gcb->rrpc = rrpc;
622         gcb->rblk = rblk;
623
624         INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
625         queue_work(rrpc->kgc_wq, &gcb->ws_gc);
626 }
627
628 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
629                                                 sector_t laddr, uint8_t npages)
630 {
631         struct rrpc_addr *p;
632         struct rrpc_block *rblk;
633         struct nvm_lun *lun;
634         int cmnt_size, i;
635
636         for (i = 0; i < npages; i++) {
637                 p = &rrpc->trans_map[laddr + i];
638                 rblk = p->rblk;
639                 lun = rblk->parent->lun;
640
641                 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
642                 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
643                         rrpc_run_gc(rrpc, rblk);
644         }
645 }
646
647 static int rrpc_end_io(struct nvm_rq *rqd, int error)
648 {
649         struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
650         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
651         uint8_t npages = rqd->nr_pages;
652         sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
653
654         if (bio_data_dir(rqd->bio) == WRITE)
655                 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
656
657         bio_put(rqd->bio);
658
659         if (rrqd->flags & NVM_IOTYPE_GC)
660                 return 0;
661
662         rrpc_unlock_rq(rrpc, rqd);
663
664         if (npages > 1)
665                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
666         if (rqd->metadata)
667                 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
668
669         mempool_free(rqd, rrpc->rq_pool);
670
671         return 0;
672 }
673
674 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
675                         struct nvm_rq *rqd, unsigned long flags, int npages)
676 {
677         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
678         struct rrpc_addr *gp;
679         sector_t laddr = rrpc_get_laddr(bio);
680         int is_gc = flags & NVM_IOTYPE_GC;
681         int i;
682
683         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
684                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
685                 return NVM_IO_REQUEUE;
686         }
687
688         for (i = 0; i < npages; i++) {
689                 /* We assume that mapping occurs at 4KB granularity */
690                 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
691                 gp = &rrpc->trans_map[laddr + i];
692
693                 if (gp->rblk) {
694                         rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
695                                                                 gp->addr);
696                 } else {
697                         BUG_ON(is_gc);
698                         rrpc_unlock_laddr(rrpc, r);
699                         nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
700                                                         rqd->dma_ppa_list);
701                         return NVM_IO_DONE;
702                 }
703         }
704
705         rqd->opcode = NVM_OP_HBREAD;
706
707         return NVM_IO_OK;
708 }
709
710 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
711                                                         unsigned long flags)
712 {
713         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
714         int is_gc = flags & NVM_IOTYPE_GC;
715         sector_t laddr = rrpc_get_laddr(bio);
716         struct rrpc_addr *gp;
717
718         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
719                 return NVM_IO_REQUEUE;
720
721         BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
722         gp = &rrpc->trans_map[laddr];
723
724         if (gp->rblk) {
725                 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
726         } else {
727                 BUG_ON(is_gc);
728                 rrpc_unlock_rq(rrpc, rqd);
729                 return NVM_IO_DONE;
730         }
731
732         rqd->opcode = NVM_OP_HBREAD;
733         rrqd->addr = gp;
734
735         return NVM_IO_OK;
736 }
737
738 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
739                         struct nvm_rq *rqd, unsigned long flags, int npages)
740 {
741         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
742         struct rrpc_addr *p;
743         sector_t laddr = rrpc_get_laddr(bio);
744         int is_gc = flags & NVM_IOTYPE_GC;
745         int i;
746
747         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
748                 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
749                 return NVM_IO_REQUEUE;
750         }
751
752         for (i = 0; i < npages; i++) {
753                 /* We assume that mapping occurs at 4KB granularity */
754                 p = rrpc_map_page(rrpc, laddr + i, is_gc);
755                 if (!p) {
756                         BUG_ON(is_gc);
757                         rrpc_unlock_laddr(rrpc, r);
758                         nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
759                                                         rqd->dma_ppa_list);
760                         rrpc_gc_kick(rrpc);
761                         return NVM_IO_REQUEUE;
762                 }
763
764                 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
765                                                                 p->addr);
766         }
767
768         rqd->opcode = NVM_OP_HBWRITE;
769
770         return NVM_IO_OK;
771 }
772
773 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
774                                 struct nvm_rq *rqd, unsigned long flags)
775 {
776         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
777         struct rrpc_addr *p;
778         int is_gc = flags & NVM_IOTYPE_GC;
779         sector_t laddr = rrpc_get_laddr(bio);
780
781         if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
782                 return NVM_IO_REQUEUE;
783
784         p = rrpc_map_page(rrpc, laddr, is_gc);
785         if (!p) {
786                 BUG_ON(is_gc);
787                 rrpc_unlock_rq(rrpc, rqd);
788                 rrpc_gc_kick(rrpc);
789                 return NVM_IO_REQUEUE;
790         }
791
792         rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
793         rqd->opcode = NVM_OP_HBWRITE;
794         rrqd->addr = p;
795
796         return NVM_IO_OK;
797 }
798
799 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
800                         struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
801 {
802         if (npages > 1) {
803                 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
804                                                         &rqd->dma_ppa_list);
805                 if (!rqd->ppa_list) {
806                         pr_err("rrpc: not able to allocate ppa list\n");
807                         return NVM_IO_ERR;
808                 }
809
810                 if (bio_rw(bio) == WRITE)
811                         return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
812                                                                         npages);
813
814                 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
815         }
816
817         if (bio_rw(bio) == WRITE)
818                 return rrpc_write_rq(rrpc, bio, rqd, flags);
819
820         return rrpc_read_rq(rrpc, bio, rqd, flags);
821 }
822
823 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
824                                 struct nvm_rq *rqd, unsigned long flags)
825 {
826         int err;
827         struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
828         uint8_t nr_pages = rrpc_get_pages(bio);
829         int bio_size = bio_sectors(bio) << 9;
830
831         if (bio_size < rrpc->dev->sec_size)
832                 return NVM_IO_ERR;
833         else if (bio_size > rrpc->dev->max_rq_size)
834                 return NVM_IO_ERR;
835
836         err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
837         if (err)
838                 return err;
839
840         bio_get(bio);
841         rqd->bio = bio;
842         rqd->ins = &rrpc->instance;
843         rqd->nr_pages = nr_pages;
844         rrq->flags = flags;
845
846         err = nvm_submit_io(rrpc->dev, rqd);
847         if (err) {
848                 pr_err("rrpc: I/O submission failed: %d\n", err);
849                 bio_put(bio);
850                 if (!(flags & NVM_IOTYPE_GC)) {
851                         rrpc_unlock_rq(rrpc, rqd);
852                         if (rqd->nr_pages > 1)
853                                 nvm_dev_dma_free(rrpc->dev,
854                         rqd->ppa_list, rqd->dma_ppa_list);
855                 }
856                 return NVM_IO_ERR;
857         }
858
859         return NVM_IO_OK;
860 }
861
862 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
863 {
864         struct rrpc *rrpc = q->queuedata;
865         struct nvm_rq *rqd;
866         int err;
867
868         if (bio->bi_rw & REQ_DISCARD) {
869                 rrpc_discard(rrpc, bio);
870                 return BLK_QC_T_NONE;
871         }
872
873         rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
874         if (!rqd) {
875                 pr_err_ratelimited("rrpc: not able to queue bio.");
876                 bio_io_error(bio);
877                 return BLK_QC_T_NONE;
878         }
879         memset(rqd, 0, sizeof(struct nvm_rq));
880
881         err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
882         switch (err) {
883         case NVM_IO_OK:
884                 return BLK_QC_T_NONE;
885         case NVM_IO_ERR:
886                 bio_io_error(bio);
887                 break;
888         case NVM_IO_DONE:
889                 bio_endio(bio);
890                 break;
891         case NVM_IO_REQUEUE:
892                 spin_lock(&rrpc->bio_lock);
893                 bio_list_add(&rrpc->requeue_bios, bio);
894                 spin_unlock(&rrpc->bio_lock);
895                 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
896                 break;
897         }
898
899         mempool_free(rqd, rrpc->rq_pool);
900         return BLK_QC_T_NONE;
901 }
902
903 static void rrpc_requeue(struct work_struct *work)
904 {
905         struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
906         struct bio_list bios;
907         struct bio *bio;
908
909         bio_list_init(&bios);
910
911         spin_lock(&rrpc->bio_lock);
912         bio_list_merge(&bios, &rrpc->requeue_bios);
913         bio_list_init(&rrpc->requeue_bios);
914         spin_unlock(&rrpc->bio_lock);
915
916         while ((bio = bio_list_pop(&bios)))
917                 rrpc_make_rq(rrpc->disk->queue, bio);
918 }
919
920 static void rrpc_gc_free(struct rrpc *rrpc)
921 {
922         struct rrpc_lun *rlun;
923         int i;
924
925         if (rrpc->krqd_wq)
926                 destroy_workqueue(rrpc->krqd_wq);
927
928         if (rrpc->kgc_wq)
929                 destroy_workqueue(rrpc->kgc_wq);
930
931         if (!rrpc->luns)
932                 return;
933
934         for (i = 0; i < rrpc->nr_luns; i++) {
935                 rlun = &rrpc->luns[i];
936
937                 if (!rlun->blocks)
938                         break;
939                 vfree(rlun->blocks);
940         }
941 }
942
943 static int rrpc_gc_init(struct rrpc *rrpc)
944 {
945         rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
946                                                                 rrpc->nr_luns);
947         if (!rrpc->krqd_wq)
948                 return -ENOMEM;
949
950         rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
951         if (!rrpc->kgc_wq)
952                 return -ENOMEM;
953
954         setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
955
956         return 0;
957 }
958
959 static void rrpc_map_free(struct rrpc *rrpc)
960 {
961         vfree(rrpc->rev_trans_map);
962         vfree(rrpc->trans_map);
963 }
964
965 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
966 {
967         struct rrpc *rrpc = (struct rrpc *)private;
968         struct nvm_dev *dev = rrpc->dev;
969         struct rrpc_addr *addr = rrpc->trans_map + slba;
970         struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
971         sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
972         u64 elba = slba + nlb;
973         u64 i;
974
975         if (unlikely(elba > dev->total_pages)) {
976                 pr_err("nvm: L2P data from device is out of bounds!\n");
977                 return -EINVAL;
978         }
979
980         for (i = 0; i < nlb; i++) {
981                 u64 pba = le64_to_cpu(entries[i]);
982                 /* LNVM treats address-spaces as silos, LBA and PBA are
983                  * equally large and zero-indexed.
984                  */
985                 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
986                         pr_err("nvm: L2P data entry is out of bounds!\n");
987                         return -EINVAL;
988                 }
989
990                 /* Address zero is a special one. The first page on a disk is
991                  * protected. As it often holds internal device boot
992                  * information.
993                  */
994                 if (!pba)
995                         continue;
996
997                 addr[i].addr = pba;
998                 raddr[pba].addr = slba + i;
999         }
1000
1001         return 0;
1002 }
1003
1004 static int rrpc_map_init(struct rrpc *rrpc)
1005 {
1006         struct nvm_dev *dev = rrpc->dev;
1007         sector_t i;
1008         int ret;
1009
1010         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
1011         if (!rrpc->trans_map)
1012                 return -ENOMEM;
1013
1014         rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1015                                                         * rrpc->nr_pages);
1016         if (!rrpc->rev_trans_map)
1017                 return -ENOMEM;
1018
1019         for (i = 0; i < rrpc->nr_pages; i++) {
1020                 struct rrpc_addr *p = &rrpc->trans_map[i];
1021                 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1022
1023                 p->addr = ADDR_EMPTY;
1024                 r->addr = ADDR_EMPTY;
1025         }
1026
1027         if (!dev->ops->get_l2p_tbl)
1028                 return 0;
1029
1030         /* Bring up the mapping table from device */
1031         ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
1032                                                         rrpc_l2p_update, rrpc);
1033         if (ret) {
1034                 pr_err("nvm: rrpc: could not read L2P table.\n");
1035                 return -EINVAL;
1036         }
1037
1038         return 0;
1039 }
1040
1041
1042 /* Minimum pages needed within a lun */
1043 #define PAGE_POOL_SIZE 16
1044 #define ADDR_POOL_SIZE 64
1045
1046 static int rrpc_core_init(struct rrpc *rrpc)
1047 {
1048         down_write(&rrpc_lock);
1049         if (!rrpc_gcb_cache) {
1050                 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1051                                 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1052                 if (!rrpc_gcb_cache) {
1053                         up_write(&rrpc_lock);
1054                         return -ENOMEM;
1055                 }
1056
1057                 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1058                                 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1059                                 0, 0, NULL);
1060                 if (!rrpc_rq_cache) {
1061                         kmem_cache_destroy(rrpc_gcb_cache);
1062                         up_write(&rrpc_lock);
1063                         return -ENOMEM;
1064                 }
1065         }
1066         up_write(&rrpc_lock);
1067
1068         rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1069         if (!rrpc->page_pool)
1070                 return -ENOMEM;
1071
1072         rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1073                                                                 rrpc_gcb_cache);
1074         if (!rrpc->gcb_pool)
1075                 return -ENOMEM;
1076
1077         rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1078         if (!rrpc->rq_pool)
1079                 return -ENOMEM;
1080
1081         spin_lock_init(&rrpc->inflights.lock);
1082         INIT_LIST_HEAD(&rrpc->inflights.reqs);
1083
1084         return 0;
1085 }
1086
1087 static void rrpc_core_free(struct rrpc *rrpc)
1088 {
1089         mempool_destroy(rrpc->page_pool);
1090         mempool_destroy(rrpc->gcb_pool);
1091         mempool_destroy(rrpc->rq_pool);
1092 }
1093
1094 static void rrpc_luns_free(struct rrpc *rrpc)
1095 {
1096         kfree(rrpc->luns);
1097 }
1098
1099 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1100 {
1101         struct nvm_dev *dev = rrpc->dev;
1102         struct rrpc_lun *rlun;
1103         int i, j;
1104
1105         spin_lock_init(&rrpc->rev_lock);
1106
1107         rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1108                                                                 GFP_KERNEL);
1109         if (!rrpc->luns)
1110                 return -ENOMEM;
1111
1112         /* 1:1 mapping */
1113         for (i = 0; i < rrpc->nr_luns; i++) {
1114                 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
1115
1116                 if (dev->pgs_per_blk >
1117                                 MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1118                         pr_err("rrpc: number of pages per block too high.");
1119                         goto err;
1120                 }
1121
1122                 rlun = &rrpc->luns[i];
1123                 rlun->rrpc = rrpc;
1124                 rlun->parent = lun;
1125                 INIT_LIST_HEAD(&rlun->prio_list);
1126                 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1127                 spin_lock_init(&rlun->lock);
1128
1129                 rrpc->total_blocks += dev->blks_per_lun;
1130                 rrpc->nr_pages += dev->sec_per_lun;
1131
1132                 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1133                                                 rrpc->dev->blks_per_lun);
1134                 if (!rlun->blocks)
1135                         goto err;
1136
1137                 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1138                         struct rrpc_block *rblk = &rlun->blocks[j];
1139                         struct nvm_block *blk = &lun->blocks[j];
1140
1141                         rblk->parent = blk;
1142                         INIT_LIST_HEAD(&rblk->prio);
1143                         spin_lock_init(&rblk->lock);
1144                 }
1145         }
1146
1147         return 0;
1148 err:
1149         return -ENOMEM;
1150 }
1151
1152 static void rrpc_free(struct rrpc *rrpc)
1153 {
1154         rrpc_gc_free(rrpc);
1155         rrpc_map_free(rrpc);
1156         rrpc_core_free(rrpc);
1157         rrpc_luns_free(rrpc);
1158
1159         kfree(rrpc);
1160 }
1161
1162 static void rrpc_exit(void *private)
1163 {
1164         struct rrpc *rrpc = private;
1165
1166         del_timer(&rrpc->gc_timer);
1167
1168         flush_workqueue(rrpc->krqd_wq);
1169         flush_workqueue(rrpc->kgc_wq);
1170
1171         rrpc_free(rrpc);
1172 }
1173
1174 static sector_t rrpc_capacity(void *private)
1175 {
1176         struct rrpc *rrpc = private;
1177         struct nvm_dev *dev = rrpc->dev;
1178         sector_t reserved, provisioned;
1179
1180         /* cur, gc, and two emergency blocks for each lun */
1181         reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
1182         provisioned = rrpc->nr_pages - reserved;
1183
1184         if (reserved > rrpc->nr_pages) {
1185                 pr_err("rrpc: not enough space available to expose storage.\n");
1186                 return 0;
1187         }
1188
1189         sector_div(provisioned, 10);
1190         return provisioned * 9 * NR_PHY_IN_LOG;
1191 }
1192
1193 /*
1194  * Looks up the logical address from reverse trans map and check if its valid by
1195  * comparing the logical to physical address with the physical address.
1196  * Returns 0 on free, otherwise 1 if in use
1197  */
1198 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1199 {
1200         struct nvm_dev *dev = rrpc->dev;
1201         int offset;
1202         struct rrpc_addr *laddr;
1203         u64 paddr, pladdr;
1204
1205         for (offset = 0; offset < dev->pgs_per_blk; offset++) {
1206                 paddr = block_to_addr(rrpc, rblk) + offset;
1207
1208                 pladdr = rrpc->rev_trans_map[paddr].addr;
1209                 if (pladdr == ADDR_EMPTY)
1210                         continue;
1211
1212                 laddr = &rrpc->trans_map[pladdr];
1213
1214                 if (paddr == laddr->addr) {
1215                         laddr->rblk = rblk;
1216                 } else {
1217                         set_bit(offset, rblk->invalid_pages);
1218                         rblk->nr_invalid_pages++;
1219                 }
1220         }
1221 }
1222
1223 static int rrpc_blocks_init(struct rrpc *rrpc)
1224 {
1225         struct rrpc_lun *rlun;
1226         struct rrpc_block *rblk;
1227         int lun_iter, blk_iter;
1228
1229         for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1230                 rlun = &rrpc->luns[lun_iter];
1231
1232                 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1233                                                                 blk_iter++) {
1234                         rblk = &rlun->blocks[blk_iter];
1235                         rrpc_block_map_update(rrpc, rblk);
1236                 }
1237         }
1238
1239         return 0;
1240 }
1241
1242 static int rrpc_luns_configure(struct rrpc *rrpc)
1243 {
1244         struct rrpc_lun *rlun;
1245         struct rrpc_block *rblk;
1246         int i;
1247
1248         for (i = 0; i < rrpc->nr_luns; i++) {
1249                 rlun = &rrpc->luns[i];
1250
1251                 rblk = rrpc_get_blk(rrpc, rlun, 0);
1252                 if (!rblk)
1253                         goto err;
1254
1255                 rrpc_set_lun_cur(rlun, rblk);
1256
1257                 /* Emergency gc block */
1258                 rblk = rrpc_get_blk(rrpc, rlun, 1);
1259                 if (!rblk)
1260                         goto err;
1261                 rlun->gc_cur = rblk;
1262         }
1263
1264         return 0;
1265 err:
1266         rrpc_put_blks(rrpc);
1267         return -EINVAL;
1268 }
1269
1270 static struct nvm_tgt_type tt_rrpc;
1271
1272 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1273                                                 int lun_begin, int lun_end)
1274 {
1275         struct request_queue *bqueue = dev->q;
1276         struct request_queue *tqueue = tdisk->queue;
1277         struct rrpc *rrpc;
1278         int ret;
1279
1280         if (!(dev->identity.dom & NVM_RSP_L2P)) {
1281                 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1282                                                         dev->identity.dom);
1283                 return ERR_PTR(-EINVAL);
1284         }
1285
1286         rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1287         if (!rrpc)
1288                 return ERR_PTR(-ENOMEM);
1289
1290         rrpc->instance.tt = &tt_rrpc;
1291         rrpc->dev = dev;
1292         rrpc->disk = tdisk;
1293
1294         bio_list_init(&rrpc->requeue_bios);
1295         spin_lock_init(&rrpc->bio_lock);
1296         INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1297
1298         rrpc->nr_luns = lun_end - lun_begin + 1;
1299
1300         /* simple round-robin strategy */
1301         atomic_set(&rrpc->next_lun, -1);
1302
1303         ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1304         if (ret) {
1305                 pr_err("nvm: rrpc: could not initialize luns\n");
1306                 goto err;
1307         }
1308
1309         rrpc->poffset = dev->sec_per_lun * lun_begin;
1310         rrpc->lun_offset = lun_begin;
1311
1312         ret = rrpc_core_init(rrpc);
1313         if (ret) {
1314                 pr_err("nvm: rrpc: could not initialize core\n");
1315                 goto err;
1316         }
1317
1318         ret = rrpc_map_init(rrpc);
1319         if (ret) {
1320                 pr_err("nvm: rrpc: could not initialize maps\n");
1321                 goto err;
1322         }
1323
1324         ret = rrpc_blocks_init(rrpc);
1325         if (ret) {
1326                 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1327                 goto err;
1328         }
1329
1330         ret = rrpc_luns_configure(rrpc);
1331         if (ret) {
1332                 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1333                 goto err;
1334         }
1335
1336         ret = rrpc_gc_init(rrpc);
1337         if (ret) {
1338                 pr_err("nvm: rrpc: could not initialize gc\n");
1339                 goto err;
1340         }
1341
1342         /* inherit the size from the underlying device */
1343         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1344         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1345
1346         pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1347                         rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
1348
1349         mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1350
1351         return rrpc;
1352 err:
1353         rrpc_free(rrpc);
1354         return ERR_PTR(ret);
1355 }
1356
1357 /* round robin, page-based FTL, and cost-based GC */
1358 static struct nvm_tgt_type tt_rrpc = {
1359         .name           = "rrpc",
1360         .version        = {1, 0, 0},
1361
1362         .make_rq        = rrpc_make_rq,
1363         .capacity       = rrpc_capacity,
1364         .end_io         = rrpc_end_io,
1365
1366         .init           = rrpc_init,
1367         .exit           = rrpc_exit,
1368 };
1369
1370 static int __init rrpc_module_init(void)
1371 {
1372         return nvm_register_target(&tt_rrpc);
1373 }
1374
1375 static void rrpc_module_exit(void)
1376 {
1377         nvm_unregister_target(&tt_rrpc);
1378 }
1379
1380 module_init(rrpc_module_init);
1381 module_exit(rrpc_module_exit);
1382 MODULE_LICENSE("GPL v2");
1383 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");