2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
46 #define ZRAM_ATTR_RO(name) \
47 static ssize_t name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
50 struct zram *zram = dev_to_zram(d); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
54 static DEVICE_ATTR_RO(name);
56 static inline int init_done(struct zram *zram)
58 return zram->meta != NULL;
61 static inline struct zram *dev_to_zram(struct device *dev)
63 return (struct zram *)dev_to_disk(dev)->private_data;
66 static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
69 struct zram *zram = dev_to_zram(dev);
71 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
74 static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
78 struct zram *zram = dev_to_zram(dev);
80 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
84 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
87 static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
90 struct zram *zram = dev_to_zram(dev);
92 return scnprintf(buf, PAGE_SIZE, "%llu\n",
93 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
96 static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
100 struct zram *zram = dev_to_zram(dev);
102 down_read(&zram->init_lock);
103 if (init_done(zram)) {
104 struct zram_meta *meta = zram->meta;
105 val = zs_get_total_pages(meta->mem_pool);
107 up_read(&zram->init_lock);
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
112 static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
116 struct zram *zram = dev_to_zram(dev);
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
125 static ssize_t mem_limit_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
129 struct zram *zram = dev_to_zram(dev);
131 down_read(&zram->init_lock);
132 val = zram->limit_pages;
133 up_read(&zram->init_lock);
135 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
138 static ssize_t mem_limit_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t len)
143 struct zram *zram = dev_to_zram(dev);
145 limit = memparse(buf, &tmp);
146 if (buf == tmp) /* no chars parsed, invalid input */
149 down_write(&zram->init_lock);
150 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 up_write(&zram->init_lock);
156 static ssize_t mem_used_max_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
160 struct zram *zram = dev_to_zram(dev);
162 down_read(&zram->init_lock);
164 val = atomic_long_read(&zram->stats.max_used_pages);
165 up_read(&zram->init_lock);
167 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
170 static ssize_t mem_used_max_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t len)
175 struct zram *zram = dev_to_zram(dev);
177 err = kstrtoul(buf, 10, &val);
181 down_read(&zram->init_lock);
182 if (init_done(zram)) {
183 struct zram_meta *meta = zram->meta;
184 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool));
187 up_read(&zram->init_lock);
192 static ssize_t max_comp_streams_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t len)
196 struct zram *zram = dev_to_zram(dev);
199 ret = kstrtoint(buf, 0, &num);
205 down_write(&zram->init_lock);
206 if (init_done(zram)) {
207 if (!zcomp_set_max_streams(zram->comp, num)) {
208 pr_info("Cannot change max compression streams\n");
214 zram->max_comp_streams = num;
217 up_write(&zram->init_lock);
221 static ssize_t comp_algorithm_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
225 struct zram *zram = dev_to_zram(dev);
227 down_read(&zram->init_lock);
228 sz = zcomp_available_show(zram->compressor, buf);
229 up_read(&zram->init_lock);
234 static ssize_t comp_algorithm_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
237 struct zram *zram = dev_to_zram(dev);
238 down_write(&zram->init_lock);
239 if (init_done(zram)) {
240 up_write(&zram->init_lock);
241 pr_info("Can't change algorithm for initialized device\n");
244 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
245 up_write(&zram->init_lock);
249 /* flag operations needs meta->tb_lock */
250 static int zram_test_flag(struct zram_meta *meta, u32 index,
251 enum zram_pageflags flag)
253 return meta->table[index].value & BIT(flag);
256 static void zram_set_flag(struct zram_meta *meta, u32 index,
257 enum zram_pageflags flag)
259 meta->table[index].value |= BIT(flag);
262 static void zram_clear_flag(struct zram_meta *meta, u32 index,
263 enum zram_pageflags flag)
265 meta->table[index].value &= ~BIT(flag);
268 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
270 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
273 static void zram_set_obj_size(struct zram_meta *meta,
274 u32 index, size_t size)
276 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
278 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
281 static inline int is_partial_io(struct bio_vec *bvec)
283 return bvec->bv_len != PAGE_SIZE;
287 * Check if request is within bounds and aligned on zram logical blocks.
289 static inline int valid_io_request(struct zram *zram,
290 sector_t start, unsigned int size)
294 /* unaligned request */
295 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
297 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
300 end = start + (size >> SECTOR_SHIFT);
301 bound = zram->disksize >> SECTOR_SHIFT;
302 /* out of range range */
303 if (unlikely(start >= bound || end > bound || start > end))
306 /* I/O request is valid */
310 static void zram_meta_free(struct zram_meta *meta)
312 zs_destroy_pool(meta->mem_pool);
317 static struct zram_meta *zram_meta_alloc(u64 disksize)
320 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
324 num_pages = disksize >> PAGE_SHIFT;
325 meta->table = vzalloc(num_pages * sizeof(*meta->table));
327 pr_err("Error allocating zram address table\n");
331 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
332 if (!meta->mem_pool) {
333 pr_err("Error creating memory pool\n");
348 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
350 if (*offset + bvec->bv_len >= PAGE_SIZE)
352 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
355 static int page_zero_filled(void *ptr)
360 page = (unsigned long *)ptr;
362 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
370 static void handle_zero_page(struct bio_vec *bvec)
372 struct page *page = bvec->bv_page;
375 user_mem = kmap_atomic(page);
376 if (is_partial_io(bvec))
377 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
379 clear_page(user_mem);
380 kunmap_atomic(user_mem);
382 flush_dcache_page(page);
387 * To protect concurrent access to the same index entry,
388 * caller should hold this table index entry's bit_spinlock to
389 * indicate this index entry is accessing.
391 static void zram_free_page(struct zram *zram, size_t index)
393 struct zram_meta *meta = zram->meta;
394 unsigned long handle = meta->table[index].handle;
396 if (unlikely(!handle)) {
398 * No memory is allocated for zero filled pages.
399 * Simply clear zero page flag.
401 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
402 zram_clear_flag(meta, index, ZRAM_ZERO);
403 atomic64_dec(&zram->stats.zero_pages);
408 zs_free(meta->mem_pool, handle);
410 atomic64_sub(zram_get_obj_size(meta, index),
411 &zram->stats.compr_data_size);
412 atomic64_dec(&zram->stats.pages_stored);
414 meta->table[index].handle = 0;
415 zram_set_obj_size(meta, index, 0);
418 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
422 struct zram_meta *meta = zram->meta;
423 unsigned long handle;
426 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
427 handle = meta->table[index].handle;
428 size = zram_get_obj_size(meta, index);
430 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
431 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
436 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
437 if (size == PAGE_SIZE)
438 copy_page(mem, cmem);
440 ret = zcomp_decompress(zram->comp, cmem, size, mem);
441 zs_unmap_object(meta->mem_pool, handle);
442 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
444 /* Should NEVER happen. Return bio error if it does. */
446 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
453 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
454 u32 index, int offset)
458 unsigned char *user_mem, *uncmem = NULL;
459 struct zram_meta *meta = zram->meta;
460 page = bvec->bv_page;
462 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
463 if (unlikely(!meta->table[index].handle) ||
464 zram_test_flag(meta, index, ZRAM_ZERO)) {
465 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
466 handle_zero_page(bvec);
469 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
471 if (is_partial_io(bvec))
472 /* Use a temporary buffer to decompress the page */
473 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
475 user_mem = kmap_atomic(page);
476 if (!is_partial_io(bvec))
480 pr_info("Unable to allocate temp memory\n");
485 ret = zram_decompress_page(zram, uncmem, index);
486 /* Should NEVER happen. Return bio error if it does. */
490 if (is_partial_io(bvec))
491 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
494 flush_dcache_page(page);
497 kunmap_atomic(user_mem);
498 if (is_partial_io(bvec))
503 static inline void update_used_max(struct zram *zram,
504 const unsigned long pages)
506 int old_max, cur_max;
508 old_max = atomic_long_read(&zram->stats.max_used_pages);
513 old_max = atomic_long_cmpxchg(
514 &zram->stats.max_used_pages, cur_max, pages);
515 } while (old_max != cur_max);
518 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
523 unsigned long handle;
525 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
526 struct zram_meta *meta = zram->meta;
527 struct zcomp_strm *zstrm;
529 unsigned long alloced_pages;
531 page = bvec->bv_page;
532 if (is_partial_io(bvec)) {
534 * This is a partial IO. We need to read the full page
535 * before to write the changes.
537 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
542 ret = zram_decompress_page(zram, uncmem, index);
547 zstrm = zcomp_strm_find(zram->comp);
549 user_mem = kmap_atomic(page);
551 if (is_partial_io(bvec)) {
552 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
554 kunmap_atomic(user_mem);
560 if (page_zero_filled(uncmem)) {
562 kunmap_atomic(user_mem);
563 /* Free memory associated with this sector now. */
564 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
565 zram_free_page(zram, index);
566 zram_set_flag(meta, index, ZRAM_ZERO);
567 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
569 atomic64_inc(&zram->stats.zero_pages);
574 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
575 if (!is_partial_io(bvec)) {
576 kunmap_atomic(user_mem);
582 pr_err("Compression failed! err=%d\n", ret);
586 if (unlikely(clen > max_zpage_size)) {
588 if (is_partial_io(bvec))
592 handle = zs_malloc(meta->mem_pool, clen);
594 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
600 alloced_pages = zs_get_total_pages(meta->mem_pool);
601 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
602 zs_free(meta->mem_pool, handle);
607 update_used_max(zram, alloced_pages);
609 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
611 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
612 src = kmap_atomic(page);
613 copy_page(cmem, src);
616 memcpy(cmem, src, clen);
619 zcomp_strm_release(zram->comp, zstrm);
621 zs_unmap_object(meta->mem_pool, handle);
624 * Free memory associated with this sector
625 * before overwriting unused sectors.
627 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
628 zram_free_page(zram, index);
630 meta->table[index].handle = handle;
631 zram_set_obj_size(meta, index, clen);
632 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
635 atomic64_add(clen, &zram->stats.compr_data_size);
636 atomic64_inc(&zram->stats.pages_stored);
639 zcomp_strm_release(zram->comp, zstrm);
640 if (is_partial_io(bvec))
645 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
651 atomic64_inc(&zram->stats.num_reads);
652 ret = zram_bvec_read(zram, bvec, index, offset);
654 atomic64_inc(&zram->stats.num_writes);
655 ret = zram_bvec_write(zram, bvec, index, offset);
660 atomic64_inc(&zram->stats.failed_reads);
662 atomic64_inc(&zram->stats.failed_writes);
669 * zram_bio_discard - handler on discard request
670 * @index: physical block index in PAGE_SIZE units
671 * @offset: byte offset within physical block
673 static void zram_bio_discard(struct zram *zram, u32 index,
674 int offset, struct bio *bio)
676 size_t n = bio->bi_iter.bi_size;
677 struct zram_meta *meta = zram->meta;
680 * zram manages data in physical block size units. Because logical block
681 * size isn't identical with physical block size on some arch, we
682 * could get a discard request pointing to a specific offset within a
683 * certain physical block. Although we can handle this request by
684 * reading that physiclal block and decompressing and partially zeroing
685 * and re-compressing and then re-storing it, this isn't reasonable
686 * because our intent with a discard request is to save memory. So
687 * skipping this logical block is appropriate here.
690 if (n <= (PAGE_SIZE - offset))
693 n -= (PAGE_SIZE - offset);
697 while (n >= PAGE_SIZE) {
698 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
699 zram_free_page(zram, index);
700 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
701 atomic64_inc(&zram->stats.notify_free);
707 static void zram_reset_device(struct zram *zram, bool reset_capacity)
710 struct zram_meta *meta;
712 down_write(&zram->init_lock);
714 zram->limit_pages = 0;
716 if (!init_done(zram)) {
717 up_write(&zram->init_lock);
722 /* Free all pages that are still in this zram device */
723 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
724 unsigned long handle = meta->table[index].handle;
728 zs_free(meta->mem_pool, handle);
731 zcomp_destroy(zram->comp);
732 zram->max_comp_streams = 1;
734 zram_meta_free(zram->meta);
737 memset(&zram->stats, 0, sizeof(zram->stats));
741 set_capacity(zram->disk, 0);
743 up_write(&zram->init_lock);
746 * Revalidate disk out of the init_lock to avoid lockdep splat.
747 * It's okay because disk's capacity is protected by init_lock
748 * so that revalidate_disk always sees up-to-date capacity.
751 revalidate_disk(zram->disk);
754 static ssize_t disksize_store(struct device *dev,
755 struct device_attribute *attr, const char *buf, size_t len)
759 struct zram_meta *meta;
760 struct zram *zram = dev_to_zram(dev);
763 disksize = memparse(buf, NULL);
767 disksize = PAGE_ALIGN(disksize);
768 meta = zram_meta_alloc(disksize);
772 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
774 pr_info("Cannot initialise %s compressing backend\n",
780 down_write(&zram->init_lock);
781 if (init_done(zram)) {
782 pr_info("Cannot change disksize for initialized device\n");
784 goto out_destroy_comp;
789 zram->disksize = disksize;
790 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
791 up_write(&zram->init_lock);
794 * Revalidate disk out of the init_lock to avoid lockdep splat.
795 * It's okay because disk's capacity is protected by init_lock
796 * so that revalidate_disk always sees up-to-date capacity.
798 revalidate_disk(zram->disk);
803 up_write(&zram->init_lock);
806 zram_meta_free(meta);
810 static ssize_t reset_store(struct device *dev,
811 struct device_attribute *attr, const char *buf, size_t len)
814 unsigned short do_reset;
816 struct block_device *bdev;
818 zram = dev_to_zram(dev);
819 bdev = bdget_disk(zram->disk, 0);
824 /* Do not reset an active device! */
825 if (bdev->bd_holders) {
830 ret = kstrtou16(buf, 10, &do_reset);
839 /* Make sure all pending I/O is finished */
843 zram_reset_device(zram, true);
851 static void __zram_make_request(struct zram *zram, struct bio *bio)
856 struct bvec_iter iter;
858 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
859 offset = (bio->bi_iter.bi_sector &
860 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
862 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
863 zram_bio_discard(zram, index, offset, bio);
868 rw = bio_data_dir(bio);
869 bio_for_each_segment(bvec, bio, iter) {
870 int max_transfer_size = PAGE_SIZE - offset;
872 if (bvec.bv_len > max_transfer_size) {
874 * zram_bvec_rw() can only make operation on a single
875 * zram page. Split the bio vector.
879 bv.bv_page = bvec.bv_page;
880 bv.bv_len = max_transfer_size;
881 bv.bv_offset = bvec.bv_offset;
883 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
886 bv.bv_len = bvec.bv_len - max_transfer_size;
887 bv.bv_offset += max_transfer_size;
888 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
891 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
894 update_position(&index, &offset, &bvec);
897 set_bit(BIO_UPTODATE, &bio->bi_flags);
906 * Handler function for all zram I/O requests.
908 static void zram_make_request(struct request_queue *queue, struct bio *bio)
910 struct zram *zram = queue->queuedata;
912 down_read(&zram->init_lock);
913 if (unlikely(!init_done(zram)))
916 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
917 bio->bi_iter.bi_size)) {
918 atomic64_inc(&zram->stats.invalid_io);
922 __zram_make_request(zram, bio);
923 up_read(&zram->init_lock);
928 up_read(&zram->init_lock);
932 static void zram_slot_free_notify(struct block_device *bdev,
936 struct zram_meta *meta;
938 zram = bdev->bd_disk->private_data;
941 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
942 zram_free_page(zram, index);
943 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
944 atomic64_inc(&zram->stats.notify_free);
947 static int zram_rw_page(struct block_device *bdev, sector_t sector,
948 struct page *page, int rw)
955 zram = bdev->bd_disk->private_data;
956 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
957 atomic64_inc(&zram->stats.invalid_io);
961 down_read(&zram->init_lock);
962 if (unlikely(!init_done(zram))) {
967 index = sector >> SECTORS_PER_PAGE_SHIFT;
968 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
971 bv.bv_len = PAGE_SIZE;
974 err = zram_bvec_rw(zram, &bv, index, offset, rw);
976 up_read(&zram->init_lock);
978 * If I/O fails, just return error(ie, non-zero) without
979 * calling page_endio.
980 * It causes resubmit the I/O with bio request by upper functions
981 * of rw_page(e.g., swap_readpage, __swap_writepage) and
982 * bio->bi_end_io does things to handle the error
983 * (e.g., SetPageError, set_page_dirty and extra works).
986 page_endio(page, rw, 0);
990 static const struct block_device_operations zram_devops = {
991 .swap_slot_free_notify = zram_slot_free_notify,
992 .rw_page = zram_rw_page,
996 static DEVICE_ATTR_RW(disksize);
997 static DEVICE_ATTR_RO(initstate);
998 static DEVICE_ATTR_WO(reset);
999 static DEVICE_ATTR_RO(orig_data_size);
1000 static DEVICE_ATTR_RO(mem_used_total);
1001 static DEVICE_ATTR_RW(mem_limit);
1002 static DEVICE_ATTR_RW(mem_used_max);
1003 static DEVICE_ATTR_RW(max_comp_streams);
1004 static DEVICE_ATTR_RW(comp_algorithm);
1006 ZRAM_ATTR_RO(num_reads);
1007 ZRAM_ATTR_RO(num_writes);
1008 ZRAM_ATTR_RO(failed_reads);
1009 ZRAM_ATTR_RO(failed_writes);
1010 ZRAM_ATTR_RO(invalid_io);
1011 ZRAM_ATTR_RO(notify_free);
1012 ZRAM_ATTR_RO(zero_pages);
1013 ZRAM_ATTR_RO(compr_data_size);
1015 static struct attribute *zram_disk_attrs[] = {
1016 &dev_attr_disksize.attr,
1017 &dev_attr_initstate.attr,
1018 &dev_attr_reset.attr,
1019 &dev_attr_num_reads.attr,
1020 &dev_attr_num_writes.attr,
1021 &dev_attr_failed_reads.attr,
1022 &dev_attr_failed_writes.attr,
1023 &dev_attr_invalid_io.attr,
1024 &dev_attr_notify_free.attr,
1025 &dev_attr_zero_pages.attr,
1026 &dev_attr_orig_data_size.attr,
1027 &dev_attr_compr_data_size.attr,
1028 &dev_attr_mem_used_total.attr,
1029 &dev_attr_mem_limit.attr,
1030 &dev_attr_mem_used_max.attr,
1031 &dev_attr_max_comp_streams.attr,
1032 &dev_attr_comp_algorithm.attr,
1036 static struct attribute_group zram_disk_attr_group = {
1037 .attrs = zram_disk_attrs,
1040 static int create_device(struct zram *zram, int device_id)
1044 init_rwsem(&zram->init_lock);
1046 zram->queue = blk_alloc_queue(GFP_KERNEL);
1048 pr_err("Error allocating disk queue for device %d\n",
1053 blk_queue_make_request(zram->queue, zram_make_request);
1054 zram->queue->queuedata = zram;
1056 /* gendisk structure */
1057 zram->disk = alloc_disk(1);
1059 pr_warn("Error allocating disk structure for device %d\n",
1061 goto out_free_queue;
1064 zram->disk->major = zram_major;
1065 zram->disk->first_minor = device_id;
1066 zram->disk->fops = &zram_devops;
1067 zram->disk->queue = zram->queue;
1068 zram->disk->private_data = zram;
1069 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1071 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1072 set_capacity(zram->disk, 0);
1073 /* zram devices sort of resembles non-rotational disks */
1074 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1075 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1077 * To ensure that we always get PAGE_SIZE aligned
1078 * and n*PAGE_SIZED sized I/O requests.
1080 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1081 blk_queue_logical_block_size(zram->disk->queue,
1082 ZRAM_LOGICAL_BLOCK_SIZE);
1083 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1084 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1085 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1086 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1088 * zram_bio_discard() will clear all logical blocks if logical block
1089 * size is identical with physical block size(PAGE_SIZE). But if it is
1090 * different, we will skip discarding some parts of logical blocks in
1091 * the part of the request range which isn't aligned to physical block
1092 * size. So we can't ensure that all discarded logical blocks are
1095 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1096 zram->disk->queue->limits.discard_zeroes_data = 1;
1098 zram->disk->queue->limits.discard_zeroes_data = 0;
1099 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1101 add_disk(zram->disk);
1103 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1104 &zram_disk_attr_group);
1106 pr_warn("Error creating sysfs group");
1109 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1111 zram->max_comp_streams = 1;
1115 del_gendisk(zram->disk);
1116 put_disk(zram->disk);
1118 blk_cleanup_queue(zram->queue);
1123 static void destroy_device(struct zram *zram)
1125 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1126 &zram_disk_attr_group);
1128 del_gendisk(zram->disk);
1129 put_disk(zram->disk);
1131 blk_cleanup_queue(zram->queue);
1134 static int __init zram_init(void)
1138 if (num_devices > max_num_devices) {
1139 pr_warn("Invalid value for num_devices: %u\n",
1145 zram_major = register_blkdev(0, "zram");
1146 if (zram_major <= 0) {
1147 pr_warn("Unable to get major number\n");
1152 /* Allocate the device array and initialize each one */
1153 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1154 if (!zram_devices) {
1159 for (dev_id = 0; dev_id < num_devices; dev_id++) {
1160 ret = create_device(&zram_devices[dev_id], dev_id);
1165 pr_info("Created %u device(s) ...\n", num_devices);
1171 destroy_device(&zram_devices[--dev_id]);
1172 kfree(zram_devices);
1174 unregister_blkdev(zram_major, "zram");
1179 static void __exit zram_exit(void)
1184 for (i = 0; i < num_devices; i++) {
1185 zram = &zram_devices[i];
1187 destroy_device(zram);
1189 * Shouldn't access zram->disk after destroy_device
1190 * because destroy_device already released zram->disk.
1192 zram_reset_device(zram, false);
1195 unregister_blkdev(zram_major, "zram");
1197 kfree(zram_devices);
1198 pr_debug("Cleanup done!\n");
1201 module_init(zram_init);
1202 module_exit(zram_exit);
1204 module_param(num_devices, uint, 0);
1205 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1207 MODULE_LICENSE("Dual BSD/GPL");
1208 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1209 MODULE_DESCRIPTION("Compressed RAM Block Device");