zram: propagate error to user
[firefly-linux-kernel-4.4.55.git] / drivers / block / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
42
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
45
46 #define ZRAM_ATTR_RO(name)                                              \
47 static ssize_t zram_attr_##name##_show(struct device *d,                \
48                                 struct device_attribute *attr, char *b) \
49 {                                                                       \
50         struct zram *zram = dev_to_zram(d);                             \
51         return sprintf(b, "%llu\n",                                     \
52                 (u64)atomic64_read(&zram->stats.name));                 \
53 }                                                                       \
54 static struct device_attribute dev_attr_##name =                        \
55         __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
56
57 static inline int init_done(struct zram *zram)
58 {
59         return zram->meta != NULL;
60 }
61
62 static inline struct zram *dev_to_zram(struct device *dev)
63 {
64         return (struct zram *)dev_to_disk(dev)->private_data;
65 }
66
67 static ssize_t disksize_show(struct device *dev,
68                 struct device_attribute *attr, char *buf)
69 {
70         struct zram *zram = dev_to_zram(dev);
71
72         return sprintf(buf, "%llu\n", zram->disksize);
73 }
74
75 static ssize_t initstate_show(struct device *dev,
76                 struct device_attribute *attr, char *buf)
77 {
78         u32 val;
79         struct zram *zram = dev_to_zram(dev);
80
81         down_read(&zram->init_lock);
82         val = init_done(zram);
83         up_read(&zram->init_lock);
84
85         return sprintf(buf, "%u\n", val);
86 }
87
88 static ssize_t orig_data_size_show(struct device *dev,
89                 struct device_attribute *attr, char *buf)
90 {
91         struct zram *zram = dev_to_zram(dev);
92
93         return sprintf(buf, "%llu\n",
94                 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 }
96
97 static ssize_t mem_used_total_show(struct device *dev,
98                 struct device_attribute *attr, char *buf)
99 {
100         u64 val = 0;
101         struct zram *zram = dev_to_zram(dev);
102         struct zram_meta *meta = zram->meta;
103
104         down_read(&zram->init_lock);
105         if (init_done(zram))
106                 val = zs_get_total_size_bytes(meta->mem_pool);
107         up_read(&zram->init_lock);
108
109         return sprintf(buf, "%llu\n", val);
110 }
111
112 static ssize_t max_comp_streams_show(struct device *dev,
113                 struct device_attribute *attr, char *buf)
114 {
115         int val;
116         struct zram *zram = dev_to_zram(dev);
117
118         down_read(&zram->init_lock);
119         val = zram->max_comp_streams;
120         up_read(&zram->init_lock);
121
122         return sprintf(buf, "%d\n", val);
123 }
124
125 static ssize_t max_comp_streams_store(struct device *dev,
126                 struct device_attribute *attr, const char *buf, size_t len)
127 {
128         int num;
129         struct zram *zram = dev_to_zram(dev);
130         int ret;
131
132         ret = kstrtoint(buf, 0, &num);
133         if (ret < 0)
134                 return ret;
135         if (num < 1)
136                 return -EINVAL;
137
138         down_write(&zram->init_lock);
139         if (init_done(zram)) {
140                 if (!zcomp_set_max_streams(zram->comp, num)) {
141                         pr_info("Cannot change max compression streams\n");
142                         ret = -EINVAL;
143                         goto out;
144                 }
145         }
146
147         zram->max_comp_streams = num;
148         ret = len;
149 out:
150         up_write(&zram->init_lock);
151         return ret;
152 }
153
154 static ssize_t comp_algorithm_show(struct device *dev,
155                 struct device_attribute *attr, char *buf)
156 {
157         size_t sz;
158         struct zram *zram = dev_to_zram(dev);
159
160         down_read(&zram->init_lock);
161         sz = zcomp_available_show(zram->compressor, buf);
162         up_read(&zram->init_lock);
163
164         return sz;
165 }
166
167 static ssize_t comp_algorithm_store(struct device *dev,
168                 struct device_attribute *attr, const char *buf, size_t len)
169 {
170         struct zram *zram = dev_to_zram(dev);
171         down_write(&zram->init_lock);
172         if (init_done(zram)) {
173                 up_write(&zram->init_lock);
174                 pr_info("Can't change algorithm for initialized device\n");
175                 return -EBUSY;
176         }
177         strlcpy(zram->compressor, buf, sizeof(zram->compressor));
178         up_write(&zram->init_lock);
179         return len;
180 }
181
182 /* flag operations needs meta->tb_lock */
183 static int zram_test_flag(struct zram_meta *meta, u32 index,
184                         enum zram_pageflags flag)
185 {
186         return meta->table[index].flags & BIT(flag);
187 }
188
189 static void zram_set_flag(struct zram_meta *meta, u32 index,
190                         enum zram_pageflags flag)
191 {
192         meta->table[index].flags |= BIT(flag);
193 }
194
195 static void zram_clear_flag(struct zram_meta *meta, u32 index,
196                         enum zram_pageflags flag)
197 {
198         meta->table[index].flags &= ~BIT(flag);
199 }
200
201 static inline int is_partial_io(struct bio_vec *bvec)
202 {
203         return bvec->bv_len != PAGE_SIZE;
204 }
205
206 /*
207  * Check if request is within bounds and aligned on zram logical blocks.
208  */
209 static inline int valid_io_request(struct zram *zram, struct bio *bio)
210 {
211         u64 start, end, bound;
212
213         /* unaligned request */
214         if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
215                 return 0;
216         if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
217                 return 0;
218
219         start = bio->bi_sector;
220         end = start + (bio->bi_size >> SECTOR_SHIFT);
221         bound = zram->disksize >> SECTOR_SHIFT;
222         /* out of range range */
223         if (unlikely(start >= bound || end >= bound || start > end))
224                 return 0;
225
226         /* I/O request is valid */
227         return 1;
228 }
229
230 static void zram_meta_free(struct zram_meta *meta)
231 {
232         zs_destroy_pool(meta->mem_pool);
233         vfree(meta->table);
234         kfree(meta);
235 }
236
237 static struct zram_meta *zram_meta_alloc(u64 disksize)
238 {
239         size_t num_pages;
240         struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
241         if (!meta)
242                 goto out;
243
244         num_pages = disksize >> PAGE_SHIFT;
245         meta->table = vzalloc(num_pages * sizeof(*meta->table));
246         if (!meta->table) {
247                 pr_err("Error allocating zram address table\n");
248                 goto free_meta;
249         }
250
251         meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
252         if (!meta->mem_pool) {
253                 pr_err("Error creating memory pool\n");
254                 goto free_table;
255         }
256
257         rwlock_init(&meta->tb_lock);
258         return meta;
259
260 free_table:
261         vfree(meta->table);
262 free_meta:
263         kfree(meta);
264         meta = NULL;
265 out:
266         return meta;
267 }
268
269 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
270 {
271         if (*offset + bvec->bv_len >= PAGE_SIZE)
272                 (*index)++;
273         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
274 }
275
276 static int page_zero_filled(void *ptr)
277 {
278         unsigned int pos;
279         unsigned long *page;
280
281         page = (unsigned long *)ptr;
282
283         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
284                 if (page[pos])
285                         return 0;
286         }
287
288         return 1;
289 }
290
291 static void handle_zero_page(struct bio_vec *bvec)
292 {
293         struct page *page = bvec->bv_page;
294         void *user_mem;
295
296         user_mem = kmap_atomic(page);
297         if (is_partial_io(bvec))
298                 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
299         else
300                 clear_page(user_mem);
301         kunmap_atomic(user_mem);
302
303         flush_dcache_page(page);
304 }
305
306 /* NOTE: caller should hold meta->tb_lock with write-side */
307 static void zram_free_page(struct zram *zram, size_t index)
308 {
309         struct zram_meta *meta = zram->meta;
310         unsigned long handle = meta->table[index].handle;
311
312         if (unlikely(!handle)) {
313                 /*
314                  * No memory is allocated for zero filled pages.
315                  * Simply clear zero page flag.
316                  */
317                 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
318                         zram_clear_flag(meta, index, ZRAM_ZERO);
319                         atomic64_dec(&zram->stats.zero_pages);
320                 }
321                 return;
322         }
323
324         zs_free(meta->mem_pool, handle);
325
326         atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
327         atomic64_dec(&zram->stats.pages_stored);
328
329         meta->table[index].handle = 0;
330         meta->table[index].size = 0;
331 }
332
333 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
334 {
335         int ret = 0;
336         unsigned char *cmem;
337         struct zram_meta *meta = zram->meta;
338         unsigned long handle;
339         u16 size;
340
341         read_lock(&meta->tb_lock);
342         handle = meta->table[index].handle;
343         size = meta->table[index].size;
344
345         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
346                 read_unlock(&meta->tb_lock);
347                 clear_page(mem);
348                 return 0;
349         }
350
351         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
352         if (size == PAGE_SIZE)
353                 copy_page(mem, cmem);
354         else
355                 ret = zcomp_decompress(zram->comp, cmem, size, mem);
356         zs_unmap_object(meta->mem_pool, handle);
357         read_unlock(&meta->tb_lock);
358
359         /* Should NEVER happen. Return bio error if it does. */
360         if (unlikely(ret)) {
361                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
362                 atomic64_inc(&zram->stats.failed_reads);
363                 return ret;
364         }
365
366         return 0;
367 }
368
369 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
370                           u32 index, int offset, struct bio *bio)
371 {
372         int ret;
373         struct page *page;
374         unsigned char *user_mem, *uncmem = NULL;
375         struct zram_meta *meta = zram->meta;
376         page = bvec->bv_page;
377
378         read_lock(&meta->tb_lock);
379         if (unlikely(!meta->table[index].handle) ||
380                         zram_test_flag(meta, index, ZRAM_ZERO)) {
381                 read_unlock(&meta->tb_lock);
382                 handle_zero_page(bvec);
383                 return 0;
384         }
385         read_unlock(&meta->tb_lock);
386
387         if (is_partial_io(bvec))
388                 /* Use  a temporary buffer to decompress the page */
389                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
390
391         user_mem = kmap_atomic(page);
392         if (!is_partial_io(bvec))
393                 uncmem = user_mem;
394
395         if (!uncmem) {
396                 pr_info("Unable to allocate temp memory\n");
397                 ret = -ENOMEM;
398                 goto out_cleanup;
399         }
400
401         ret = zram_decompress_page(zram, uncmem, index);
402         /* Should NEVER happen. Return bio error if it does. */
403         if (unlikely(ret))
404                 goto out_cleanup;
405
406         if (is_partial_io(bvec))
407                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
408                                 bvec->bv_len);
409
410         flush_dcache_page(page);
411         ret = 0;
412 out_cleanup:
413         kunmap_atomic(user_mem);
414         if (is_partial_io(bvec))
415                 kfree(uncmem);
416         return ret;
417 }
418
419 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
420                            int offset)
421 {
422         int ret = 0;
423         size_t clen;
424         unsigned long handle;
425         struct page *page;
426         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
427         struct zram_meta *meta = zram->meta;
428         struct zcomp_strm *zstrm;
429         bool locked = false;
430
431         page = bvec->bv_page;
432         if (is_partial_io(bvec)) {
433                 /*
434                  * This is a partial IO. We need to read the full page
435                  * before to write the changes.
436                  */
437                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
438                 if (!uncmem) {
439                         ret = -ENOMEM;
440                         goto out;
441                 }
442                 ret = zram_decompress_page(zram, uncmem, index);
443                 if (ret)
444                         goto out;
445         }
446
447         zstrm = zcomp_strm_find(zram->comp);
448         locked = true;
449         user_mem = kmap_atomic(page);
450
451         if (is_partial_io(bvec)) {
452                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
453                        bvec->bv_len);
454                 kunmap_atomic(user_mem);
455                 user_mem = NULL;
456         } else {
457                 uncmem = user_mem;
458         }
459
460         if (page_zero_filled(uncmem)) {
461                 kunmap_atomic(user_mem);
462                 /* Free memory associated with this sector now. */
463                 write_lock(&zram->meta->tb_lock);
464                 zram_free_page(zram, index);
465                 zram_set_flag(meta, index, ZRAM_ZERO);
466                 write_unlock(&zram->meta->tb_lock);
467
468                 atomic64_inc(&zram->stats.zero_pages);
469                 ret = 0;
470                 goto out;
471         }
472
473         ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
474         if (!is_partial_io(bvec)) {
475                 kunmap_atomic(user_mem);
476                 user_mem = NULL;
477                 uncmem = NULL;
478         }
479
480         if (unlikely(ret)) {
481                 pr_err("Compression failed! err=%d\n", ret);
482                 goto out;
483         }
484         src = zstrm->buffer;
485         if (unlikely(clen > max_zpage_size)) {
486                 clen = PAGE_SIZE;
487                 if (is_partial_io(bvec))
488                         src = uncmem;
489         }
490
491         handle = zs_malloc(meta->mem_pool, clen);
492         if (!handle) {
493                 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
494                         index, clen);
495                 ret = -ENOMEM;
496                 goto out;
497         }
498         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
499
500         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
501                 src = kmap_atomic(page);
502                 copy_page(cmem, src);
503                 kunmap_atomic(src);
504         } else {
505                 memcpy(cmem, src, clen);
506         }
507
508         zcomp_strm_release(zram->comp, zstrm);
509         locked = false;
510         zs_unmap_object(meta->mem_pool, handle);
511
512         /*
513          * Free memory associated with this sector
514          * before overwriting unused sectors.
515          */
516         write_lock(&zram->meta->tb_lock);
517         zram_free_page(zram, index);
518
519         meta->table[index].handle = handle;
520         meta->table[index].size = clen;
521         write_unlock(&zram->meta->tb_lock);
522
523         /* Update stats */
524         atomic64_add(clen, &zram->stats.compr_data_size);
525         atomic64_inc(&zram->stats.pages_stored);
526 out:
527         if (locked)
528                 zcomp_strm_release(zram->comp, zstrm);
529         if (is_partial_io(bvec))
530                 kfree(uncmem);
531         if (ret)
532                 atomic64_inc(&zram->stats.failed_writes);
533         return ret;
534 }
535
536 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
537                         int offset, struct bio *bio)
538 {
539         int ret;
540         int rw = bio_data_dir(bio);
541
542         if (rw == READ) {
543                 atomic64_inc(&zram->stats.num_reads);
544                 ret = zram_bvec_read(zram, bvec, index, offset, bio);
545         } else {
546                 atomic64_inc(&zram->stats.num_writes);
547                 ret = zram_bvec_write(zram, bvec, index, offset);
548         }
549
550         return ret;
551 }
552
553 static void zram_reset_device(struct zram *zram, bool reset_capacity)
554 {
555         size_t index;
556         struct zram_meta *meta;
557
558         down_write(&zram->init_lock);
559         if (!init_done(zram)) {
560                 up_write(&zram->init_lock);
561                 return;
562         }
563
564         meta = zram->meta;
565         /* Free all pages that are still in this zram device */
566         for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
567                 unsigned long handle = meta->table[index].handle;
568                 if (!handle)
569                         continue;
570
571                 zs_free(meta->mem_pool, handle);
572         }
573
574         zcomp_destroy(zram->comp);
575         zram->max_comp_streams = 1;
576
577         zram_meta_free(zram->meta);
578         zram->meta = NULL;
579         /* Reset stats */
580         memset(&zram->stats, 0, sizeof(zram->stats));
581
582         zram->disksize = 0;
583         if (reset_capacity)
584                 set_capacity(zram->disk, 0);
585         up_write(&zram->init_lock);
586 }
587
588 static ssize_t disksize_store(struct device *dev,
589                 struct device_attribute *attr, const char *buf, size_t len)
590 {
591         u64 disksize;
592         struct zcomp *comp;
593         struct zram_meta *meta;
594         struct zram *zram = dev_to_zram(dev);
595         int err;
596
597         disksize = memparse(buf, NULL);
598         if (!disksize)
599                 return -EINVAL;
600
601         disksize = PAGE_ALIGN(disksize);
602         meta = zram_meta_alloc(disksize);
603         if (!meta)
604                 return -ENOMEM;
605
606         comp = zcomp_create(zram->compressor, zram->max_comp_streams);
607         if (IS_ERR(comp)) {
608                 pr_info("Cannot initialise %s compressing backend\n",
609                                 zram->compressor);
610                 err = PTR_ERR(comp);
611                 goto out_free_meta;
612         }
613
614         down_write(&zram->init_lock);
615         if (init_done(zram)) {
616                 pr_info("Cannot change disksize for initialized device\n");
617                 err = -EBUSY;
618                 goto out_destroy_comp;
619         }
620
621         zram->meta = meta;
622         zram->comp = comp;
623         zram->disksize = disksize;
624         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
625         up_write(&zram->init_lock);
626         return len;
627
628 out_destroy_comp:
629         up_write(&zram->init_lock);
630         zcomp_destroy(comp);
631 out_free_meta:
632         zram_meta_free(meta);
633         return err;
634 }
635
636 static ssize_t reset_store(struct device *dev,
637                 struct device_attribute *attr, const char *buf, size_t len)
638 {
639         int ret;
640         unsigned short do_reset;
641         struct zram *zram;
642         struct block_device *bdev;
643
644         zram = dev_to_zram(dev);
645         bdev = bdget_disk(zram->disk, 0);
646
647         if (!bdev)
648                 return -ENOMEM;
649
650         /* Do not reset an active device! */
651         if (bdev->bd_holders) {
652                 ret = -EBUSY;
653                 goto out;
654         }
655
656         ret = kstrtou16(buf, 10, &do_reset);
657         if (ret)
658                 goto out;
659
660         if (!do_reset) {
661                 ret = -EINVAL;
662                 goto out;
663         }
664
665         /* Make sure all pending I/O is finished */
666         fsync_bdev(bdev);
667         bdput(bdev);
668
669         zram_reset_device(zram, true);
670         return len;
671
672 out:
673         bdput(bdev);
674         return ret;
675 }
676
677 static void __zram_make_request(struct zram *zram, struct bio *bio)
678 {
679         int i, offset;
680         u32 index;
681         struct bio_vec *bvec;
682
683
684         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
685         offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
686
687         bio_for_each_segment(bvec, bio, i) {
688                 int max_transfer_size = PAGE_SIZE - offset;
689
690                 if (bvec->bv_len > max_transfer_size) {
691                         /*
692                          * zram_bvec_rw() can only make operation on a single
693                          * zram page. Split the bio vector.
694                          */
695                         struct bio_vec bv;
696
697                         bv.bv_page = bvec->bv_page;
698                         bv.bv_len = max_transfer_size;
699                         bv.bv_offset = bvec->bv_offset;
700
701                         if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
702                                 goto out;
703
704                         bv.bv_len = bvec->bv_len - max_transfer_size;
705                         bv.bv_offset += max_transfer_size;
706                         if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
707                                 goto out;
708                 } else
709                         if (zram_bvec_rw(zram, bvec, index, offset, bio) < 0)
710                                 goto out;
711
712                 update_position(&index, &offset, bvec);
713         }
714
715         set_bit(BIO_UPTODATE, &bio->bi_flags);
716         bio_endio(bio, 0);
717         return;
718
719 out:
720         bio_io_error(bio);
721 }
722
723 /*
724  * Handler function for all zram I/O requests.
725  */
726 static void zram_make_request(struct request_queue *queue, struct bio *bio)
727 {
728         struct zram *zram = queue->queuedata;
729
730         down_read(&zram->init_lock);
731         if (unlikely(!init_done(zram)))
732                 goto error;
733
734         if (!valid_io_request(zram, bio)) {
735                 atomic64_inc(&zram->stats.invalid_io);
736                 goto error;
737         }
738
739         __zram_make_request(zram, bio);
740         up_read(&zram->init_lock);
741
742         return;
743
744 error:
745         up_read(&zram->init_lock);
746         bio_io_error(bio);
747 }
748
749 static void zram_slot_free_notify(struct block_device *bdev,
750                                 unsigned long index)
751 {
752         struct zram *zram;
753         struct zram_meta *meta;
754
755         zram = bdev->bd_disk->private_data;
756         meta = zram->meta;
757
758         write_lock(&meta->tb_lock);
759         zram_free_page(zram, index);
760         write_unlock(&meta->tb_lock);
761         atomic64_inc(&zram->stats.notify_free);
762 }
763
764 static const struct block_device_operations zram_devops = {
765         .swap_slot_free_notify = zram_slot_free_notify,
766         .owner = THIS_MODULE
767 };
768
769 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
770                 disksize_show, disksize_store);
771 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
772 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
773 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
774 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
775 static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
776                 max_comp_streams_show, max_comp_streams_store);
777 static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
778                 comp_algorithm_show, comp_algorithm_store);
779
780 ZRAM_ATTR_RO(num_reads);
781 ZRAM_ATTR_RO(num_writes);
782 ZRAM_ATTR_RO(failed_reads);
783 ZRAM_ATTR_RO(failed_writes);
784 ZRAM_ATTR_RO(invalid_io);
785 ZRAM_ATTR_RO(notify_free);
786 ZRAM_ATTR_RO(zero_pages);
787 ZRAM_ATTR_RO(compr_data_size);
788
789 static struct attribute *zram_disk_attrs[] = {
790         &dev_attr_disksize.attr,
791         &dev_attr_initstate.attr,
792         &dev_attr_reset.attr,
793         &dev_attr_num_reads.attr,
794         &dev_attr_num_writes.attr,
795         &dev_attr_failed_reads.attr,
796         &dev_attr_failed_writes.attr,
797         &dev_attr_invalid_io.attr,
798         &dev_attr_notify_free.attr,
799         &dev_attr_zero_pages.attr,
800         &dev_attr_orig_data_size.attr,
801         &dev_attr_compr_data_size.attr,
802         &dev_attr_mem_used_total.attr,
803         &dev_attr_max_comp_streams.attr,
804         &dev_attr_comp_algorithm.attr,
805         NULL,
806 };
807
808 static struct attribute_group zram_disk_attr_group = {
809         .attrs = zram_disk_attrs,
810 };
811
812 static int create_device(struct zram *zram, int device_id)
813 {
814         int ret = -ENOMEM;
815
816         init_rwsem(&zram->init_lock);
817
818         zram->queue = blk_alloc_queue(GFP_KERNEL);
819         if (!zram->queue) {
820                 pr_err("Error allocating disk queue for device %d\n",
821                         device_id);
822                 goto out;
823         }
824
825         blk_queue_make_request(zram->queue, zram_make_request);
826         zram->queue->queuedata = zram;
827
828          /* gendisk structure */
829         zram->disk = alloc_disk(1);
830         if (!zram->disk) {
831                 pr_warn("Error allocating disk structure for device %d\n",
832                         device_id);
833                 goto out_free_queue;
834         }
835
836         zram->disk->major = zram_major;
837         zram->disk->first_minor = device_id;
838         zram->disk->fops = &zram_devops;
839         zram->disk->queue = zram->queue;
840         zram->disk->private_data = zram;
841         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
842
843         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
844         set_capacity(zram->disk, 0);
845         /* zram devices sort of resembles non-rotational disks */
846         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
847         /*
848          * To ensure that we always get PAGE_SIZE aligned
849          * and n*PAGE_SIZED sized I/O requests.
850          */
851         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
852         blk_queue_logical_block_size(zram->disk->queue,
853                                         ZRAM_LOGICAL_BLOCK_SIZE);
854         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
855         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
856
857         add_disk(zram->disk);
858
859         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
860                                 &zram_disk_attr_group);
861         if (ret < 0) {
862                 pr_warn("Error creating sysfs group");
863                 goto out_free_disk;
864         }
865         strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
866         zram->meta = NULL;
867         zram->max_comp_streams = 1;
868         return 0;
869
870 out_free_disk:
871         del_gendisk(zram->disk);
872         put_disk(zram->disk);
873 out_free_queue:
874         blk_cleanup_queue(zram->queue);
875 out:
876         return ret;
877 }
878
879 static void destroy_device(struct zram *zram)
880 {
881         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
882                         &zram_disk_attr_group);
883
884         del_gendisk(zram->disk);
885         put_disk(zram->disk);
886
887         blk_cleanup_queue(zram->queue);
888 }
889
890 static int __init zram_init(void)
891 {
892         int ret, dev_id;
893
894         if (num_devices > max_num_devices) {
895                 pr_warn("Invalid value for num_devices: %u\n",
896                                 num_devices);
897                 ret = -EINVAL;
898                 goto out;
899         }
900
901         zram_major = register_blkdev(0, "zram");
902         if (zram_major <= 0) {
903                 pr_warn("Unable to get major number\n");
904                 ret = -EBUSY;
905                 goto out;
906         }
907
908         /* Allocate the device array and initialize each one */
909         zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
910         if (!zram_devices) {
911                 ret = -ENOMEM;
912                 goto unregister;
913         }
914
915         for (dev_id = 0; dev_id < num_devices; dev_id++) {
916                 ret = create_device(&zram_devices[dev_id], dev_id);
917                 if (ret)
918                         goto free_devices;
919         }
920
921         pr_info("Created %u device(s) ...\n", num_devices);
922
923         return 0;
924
925 free_devices:
926         while (dev_id)
927                 destroy_device(&zram_devices[--dev_id]);
928         kfree(zram_devices);
929 unregister:
930         unregister_blkdev(zram_major, "zram");
931 out:
932         return ret;
933 }
934
935 static void __exit zram_exit(void)
936 {
937         int i;
938         struct zram *zram;
939
940         for (i = 0; i < num_devices; i++) {
941                 zram = &zram_devices[i];
942
943                 destroy_device(zram);
944                 /*
945                  * Shouldn't access zram->disk after destroy_device
946                  * because destroy_device already released zram->disk.
947                  */
948                 zram_reset_device(zram, false);
949         }
950
951         unregister_blkdev(zram_major, "zram");
952
953         kfree(zram_devices);
954         pr_debug("Cleanup done!\n");
955 }
956
957 module_init(zram_init);
958 module_exit(zram_exit);
959
960 module_param(num_devices, uint, 0);
961 MODULE_PARM_DESC(num_devices, "Number of zram devices");
962
963 MODULE_LICENSE("Dual BSD/GPL");
964 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
965 MODULE_DESCRIPTION("Compressed RAM Block Device");