2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major;
40 struct zram *zram_devices;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices;
45 static void zram_stat_inc(u32 *v)
50 static void zram_stat_dec(u32 *v)
55 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
57 spin_lock(&zram->stat64_lock);
59 spin_unlock(&zram->stat64_lock);
62 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
64 spin_lock(&zram->stat64_lock);
66 spin_unlock(&zram->stat64_lock);
69 static void zram_stat64_inc(struct zram *zram, u64 *v)
71 zram_stat64_add(zram, v, 1);
74 static int zram_test_flag(struct zram *zram, u32 index,
75 enum zram_pageflags flag)
77 return zram->table[index].flags & BIT(flag);
80 static void zram_set_flag(struct zram *zram, u32 index,
81 enum zram_pageflags flag)
83 zram->table[index].flags |= BIT(flag);
86 static void zram_clear_flag(struct zram *zram, u32 index,
87 enum zram_pageflags flag)
89 zram->table[index].flags &= ~BIT(flag);
92 static int page_zero_filled(void *ptr)
97 page = (unsigned long *)ptr;
99 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
107 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
109 if (!zram->disksize) {
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
115 zram->disksize = default_disksize_perc_ram *
116 (totalram_bytes / 100);
119 if (zram->disksize > 2 * (totalram_bytes)) {
121 "There is little point creating a zram of greater than "
122 "twice the size of memory since we expect a 2:1 compression "
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
126 "\tMemory Size: %zu kB\n"
127 "\tSize you selected: %llu kB\n"
128 "Continuing anyway ...\n",
129 totalram_bytes >> 10, zram->disksize
133 zram->disksize &= PAGE_MASK;
136 static void zram_free_page(struct zram *zram, size_t index)
138 void *handle = zram->table[index].handle;
140 if (unlikely(!handle)) {
142 * No memory is allocated for zero filled pages.
143 * Simply clear zero page flag.
145 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
146 zram_clear_flag(zram, index, ZRAM_ZERO);
147 zram_stat_dec(&zram->stats.pages_zero);
152 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
154 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
155 zram_stat_dec(&zram->stats.pages_expand);
159 zs_free(zram->mem_pool, handle);
161 if (zram->table[index].size <= PAGE_SIZE / 2)
162 zram_stat_dec(&zram->stats.good_compress);
165 zram_stat64_sub(zram, &zram->stats.compr_size,
166 zram->table[index].size);
167 zram_stat_dec(&zram->stats.pages_stored);
169 zram->table[index].handle = NULL;
170 zram->table[index].size = 0;
173 static void handle_zero_page(struct bio_vec *bvec)
175 struct page *page = bvec->bv_page;
178 user_mem = kmap_atomic(page, KM_USER0);
179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
180 kunmap_atomic(user_mem, KM_USER0);
182 flush_dcache_page(page);
185 static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
186 u32 index, int offset)
188 struct page *page = bvec->bv_page;
189 unsigned char *user_mem, *cmem;
191 user_mem = kmap_atomic(page, KM_USER0);
192 cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
195 kunmap_atomic(cmem, KM_USER1);
196 kunmap_atomic(user_mem, KM_USER0);
198 flush_dcache_page(page);
201 static inline int is_partial_io(struct bio_vec *bvec)
203 return bvec->bv_len != PAGE_SIZE;
206 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
207 u32 index, int offset, struct bio *bio)
212 struct zobj_header *zheader;
213 unsigned char *user_mem, *cmem, *uncmem = NULL;
215 page = bvec->bv_page;
217 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
218 handle_zero_page(bvec);
222 /* Requested page is not present in compressed area */
223 if (unlikely(!zram->table[index].handle)) {
224 pr_debug("Read before write: sector=%lu, size=%u",
225 (ulong)(bio->bi_sector), bio->bi_size);
226 handle_zero_page(bvec);
230 /* Page is stored uncompressed since it's incompressible */
231 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
232 handle_uncompressed_page(zram, bvec, index, offset);
236 if (is_partial_io(bvec)) {
237 /* Use a temporary buffer to decompress the page */
238 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
240 pr_info("Error allocating temp memory!\n");
245 user_mem = kmap_atomic(page, KM_USER0);
246 if (!is_partial_io(bvec))
250 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
252 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
253 zram->table[index].size,
256 if (is_partial_io(bvec)) {
257 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
262 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
263 kunmap_atomic(user_mem, KM_USER0);
265 /* Should NEVER happen. Return bio error if it does. */
266 if (unlikely(ret != LZO_E_OK)) {
267 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
268 zram_stat64_inc(zram, &zram->stats.failed_reads);
272 flush_dcache_page(page);
277 static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
280 size_t clen = PAGE_SIZE;
281 struct zobj_header *zheader;
284 if (zram_test_flag(zram, index, ZRAM_ZERO) ||
285 !zram->table[index].handle) {
286 memset(mem, 0, PAGE_SIZE);
290 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
292 /* Page is stored uncompressed since it's incompressible */
293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
294 memcpy(mem, cmem, PAGE_SIZE);
295 kunmap_atomic(cmem, KM_USER0);
299 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
300 zram->table[index].size,
302 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
304 /* Should NEVER happen. Return bio error if it does. */
305 if (unlikely(ret != LZO_E_OK)) {
306 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
307 zram_stat64_inc(zram, &zram->stats.failed_reads);
314 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
321 struct zobj_header *zheader;
322 struct page *page, *page_store;
323 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
325 page = bvec->bv_page;
326 src = zram->compress_buffer;
328 if (is_partial_io(bvec)) {
330 * This is a partial IO. We need to read the full page
331 * before to write the changes.
333 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
335 pr_info("Error allocating temp memory!\n");
339 ret = zram_read_before_write(zram, uncmem, index);
347 * System overwrites unused sectors. Free memory associated
348 * with this sector now.
350 if (zram->table[index].handle ||
351 zram_test_flag(zram, index, ZRAM_ZERO))
352 zram_free_page(zram, index);
354 user_mem = kmap_atomic(page, KM_USER0);
356 if (is_partial_io(bvec))
357 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
362 if (page_zero_filled(uncmem)) {
363 kunmap_atomic(user_mem, KM_USER0);
364 if (is_partial_io(bvec))
366 zram_stat_inc(&zram->stats.pages_zero);
367 zram_set_flag(zram, index, ZRAM_ZERO);
372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
373 zram->compress_workmem);
375 kunmap_atomic(user_mem, KM_USER0);
376 if (is_partial_io(bvec))
379 if (unlikely(ret != LZO_E_OK)) {
380 pr_err("Compression failed! err=%d\n", ret);
385 * Page is incompressible. Store it as-is (uncompressed)
386 * since we do not want to return too many disk write
387 * errors which has side effect of hanging the system.
389 if (unlikely(clen > max_zpage_size)) {
391 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
392 if (unlikely(!page_store)) {
393 pr_info("Error allocating memory for "
394 "incompressible page: %u\n", index);
400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
401 zram_stat_inc(&zram->stats.pages_expand);
403 src = kmap_atomic(page, KM_USER0);
404 cmem = kmap_atomic(page_store, KM_USER1);
408 handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
410 pr_info("Error allocating memory for compressed "
411 "page: %u, size=%zu\n", index, clen);
415 cmem = zs_map_object(zram->mem_pool, handle);
419 /* Back-reference needed for memory defragmentation */
420 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
421 zheader = (struct zobj_header *)cmem;
422 zheader->table_idx = index;
423 cmem += sizeof(*zheader);
427 memcpy(cmem, src, clen);
429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
430 kunmap_atomic(cmem, KM_USER1);
431 kunmap_atomic(src, KM_USER0);
433 zs_unmap_object(zram->mem_pool, handle);
436 zram->table[index].handle = handle;
437 zram->table[index].size = clen;
440 zram_stat64_add(zram, &zram->stats.compr_size, clen);
441 zram_stat_inc(&zram->stats.pages_stored);
442 if (clen <= PAGE_SIZE / 2)
443 zram_stat_inc(&zram->stats.good_compress);
449 zram_stat64_inc(zram, &zram->stats.failed_writes);
453 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
454 int offset, struct bio *bio, int rw)
459 down_read(&zram->lock);
460 ret = zram_bvec_read(zram, bvec, index, offset, bio);
461 up_read(&zram->lock);
463 down_write(&zram->lock);
464 ret = zram_bvec_write(zram, bvec, index, offset);
465 up_write(&zram->lock);
471 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
473 if (*offset + bvec->bv_len >= PAGE_SIZE)
475 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
478 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
482 struct bio_vec *bvec;
486 zram_stat64_inc(zram, &zram->stats.num_reads);
489 zram_stat64_inc(zram, &zram->stats.num_writes);
493 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
494 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
496 bio_for_each_segment(bvec, bio, i) {
497 int max_transfer_size = PAGE_SIZE - offset;
499 if (bvec->bv_len > max_transfer_size) {
501 * zram_bvec_rw() can only make operation on a single
502 * zram page. Split the bio vector.
506 bv.bv_page = bvec->bv_page;
507 bv.bv_len = max_transfer_size;
508 bv.bv_offset = bvec->bv_offset;
510 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
513 bv.bv_len = bvec->bv_len - max_transfer_size;
514 bv.bv_offset += max_transfer_size;
515 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
518 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
522 update_position(&index, &offset, bvec);
525 set_bit(BIO_UPTODATE, &bio->bi_flags);
534 * Check if request is within bounds and aligned on zram logical blocks.
536 static inline int valid_io_request(struct zram *zram, struct bio *bio)
539 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
540 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
541 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
546 /* I/O request is valid */
551 * Handler function for all zram I/O requests.
553 static void zram_make_request(struct request_queue *queue, struct bio *bio)
555 struct zram *zram = queue->queuedata;
557 if (unlikely(!zram->init_done) && zram_init_device(zram))
560 down_read(&zram->init_lock);
561 if (unlikely(!zram->init_done))
564 if (!valid_io_request(zram, bio)) {
565 zram_stat64_inc(zram, &zram->stats.invalid_io);
569 __zram_make_request(zram, bio, bio_data_dir(bio));
570 up_read(&zram->init_lock);
575 up_read(&zram->init_lock);
580 void __zram_reset_device(struct zram *zram)
586 /* Free various per-device buffers */
587 kfree(zram->compress_workmem);
588 free_pages((unsigned long)zram->compress_buffer, 1);
590 zram->compress_workmem = NULL;
591 zram->compress_buffer = NULL;
593 /* Free all pages that are still in this zram device */
594 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
595 void *handle = zram->table[index].handle;
599 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
602 zs_free(zram->mem_pool, handle);
608 zs_destroy_pool(zram->mem_pool);
609 zram->mem_pool = NULL;
612 memset(&zram->stats, 0, sizeof(zram->stats));
617 void zram_reset_device(struct zram *zram)
619 down_write(&zram->init_lock);
620 __zram_reset_device(zram);
621 up_write(&zram->init_lock);
624 int zram_init_device(struct zram *zram)
629 down_write(&zram->init_lock);
631 if (zram->init_done) {
632 up_write(&zram->init_lock);
636 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
638 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
639 if (!zram->compress_workmem) {
640 pr_err("Error allocating compressor working memory!\n");
645 zram->compress_buffer =
646 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
647 if (!zram->compress_buffer) {
648 pr_err("Error allocating compressor buffer space\n");
653 num_pages = zram->disksize >> PAGE_SHIFT;
654 zram->table = vzalloc(num_pages * sizeof(*zram->table));
656 pr_err("Error allocating zram address table\n");
661 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
663 /* zram devices sort of resembles non-rotational disks */
664 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
666 zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
667 if (!zram->mem_pool) {
668 pr_err("Error creating memory pool\n");
674 up_write(&zram->init_lock);
676 pr_debug("Initialization done!\n");
680 /* To prevent accessing table entries during cleanup */
683 __zram_reset_device(zram);
684 up_write(&zram->init_lock);
685 pr_err("Initialization failed: err=%d\n", ret);
689 static void zram_slot_free_notify(struct block_device *bdev,
694 zram = bdev->bd_disk->private_data;
695 zram_free_page(zram, index);
696 zram_stat64_inc(zram, &zram->stats.notify_free);
699 static const struct block_device_operations zram_devops = {
700 .swap_slot_free_notify = zram_slot_free_notify,
704 static int create_device(struct zram *zram, int device_id)
708 init_rwsem(&zram->lock);
709 init_rwsem(&zram->init_lock);
710 spin_lock_init(&zram->stat64_lock);
712 zram->queue = blk_alloc_queue(GFP_KERNEL);
714 pr_err("Error allocating disk queue for device %d\n",
720 blk_queue_make_request(zram->queue, zram_make_request);
721 zram->queue->queuedata = zram;
723 /* gendisk structure */
724 zram->disk = alloc_disk(1);
726 blk_cleanup_queue(zram->queue);
727 pr_warning("Error allocating disk structure for device %d\n",
733 zram->disk->major = zram_major;
734 zram->disk->first_minor = device_id;
735 zram->disk->fops = &zram_devops;
736 zram->disk->queue = zram->queue;
737 zram->disk->private_data = zram;
738 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
740 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
741 set_capacity(zram->disk, 0);
744 * To ensure that we always get PAGE_SIZE aligned
745 * and n*PAGE_SIZED sized I/O requests.
747 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
748 blk_queue_logical_block_size(zram->disk->queue,
749 ZRAM_LOGICAL_BLOCK_SIZE);
750 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
751 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
753 add_disk(zram->disk);
755 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
756 &zram_disk_attr_group);
758 pr_warning("Error creating sysfs group");
768 static void destroy_device(struct zram *zram)
770 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
771 &zram_disk_attr_group);
774 del_gendisk(zram->disk);
775 put_disk(zram->disk);
779 blk_cleanup_queue(zram->queue);
782 unsigned int zram_get_num_devices(void)
787 static int __init zram_init(void)
791 if (num_devices > max_num_devices) {
792 pr_warning("Invalid value for num_devices: %u\n",
798 zram_major = register_blkdev(0, "zram");
799 if (zram_major <= 0) {
800 pr_warning("Unable to get major number\n");
806 pr_info("num_devices not specified. Using default: 1\n");
810 /* Allocate the device array and initialize each one */
811 pr_info("Creating %u devices ...\n", num_devices);
812 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
818 for (dev_id = 0; dev_id < num_devices; dev_id++) {
819 ret = create_device(&zram_devices[dev_id], dev_id);
828 destroy_device(&zram_devices[--dev_id]);
831 unregister_blkdev(zram_major, "zram");
836 static void __exit zram_exit(void)
841 for (i = 0; i < num_devices; i++) {
842 zram = &zram_devices[i];
844 destroy_device(zram);
846 zram_reset_device(zram);
849 unregister_blkdev(zram_major, "zram");
852 pr_debug("Cleanup done!\n");
855 module_param(num_devices, uint, 0);
856 MODULE_PARM_DESC(num_devices, "Number of zram devices");
858 module_init(zram_init);
859 module_exit(zram_exit);
861 MODULE_LICENSE("Dual BSD/GPL");
862 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
863 MODULE_DESCRIPTION("Compressed RAM Block Device");