#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/lzo.h>
-#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/swap.h>
#include <linux/swapops.h>
mem_used = xv_get_total_size_bytes(rzs->mem_pool)
+ (rs->pages_expand << PAGE_SHIFT);
- succ_writes = rs->num_writes - rs->failed_writes;
+ succ_writes = rzs_stat64_read(rzs, &rs->num_writes) -
+ rzs_stat64_read(rzs, &rs->failed_writes);
if (succ_writes && rs->pages_stored) {
good_compress_perc = rs->good_compress * 100
/ rs->pages_stored;
}
- s->num_reads = rs->num_reads;
- s->num_writes = rs->num_writes;
- s->failed_reads = rs->failed_reads;
- s->failed_writes = rs->failed_writes;
- s->invalid_io = rs->invalid_io;
+ s->num_reads = rzs_stat64_read(rzs, &rs->num_reads);
+ s->num_writes = rzs_stat64_read(rzs, &rs->num_writes);
+ s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads);
+ s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes);
+ s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io);
+ s->notify_free = rzs_stat64_read(rzs, &rs->notify_free);
s->pages_zero = rs->pages_zero;
s->good_compress_pct = good_compress_perc;
s->compr_data_size = rs->compr_size;
s->mem_used_total = mem_used;
- s->bdev_num_reads = rs->bdev_num_reads;
- s->bdev_num_writes = rs->bdev_num_writes;
+ s->bdev_num_reads = rzs_stat64_read(rzs, &rs->bdev_num_reads);
+ s->bdev_num_writes = rzs_stat64_read(rzs, &rs->bdev_num_writes);
}
#endif /* CONFIG_RAMZSWAP_STATS */
}
if (unlikely(!page)) {
if (rzs_test_flag(rzs, index, RZS_ZERO)) {
rzs_clear_flag(rzs, index, RZS_ZERO);
- stat_dec(rzs->stats.pages_zero);
+ rzs_stat_dec(&rzs->stats.pages_zero);
}
return;
}
clen = PAGE_SIZE;
__free_page(page);
rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED);
- stat_dec(rzs->stats.pages_expand);
+ rzs_stat_dec(&rzs->stats.pages_expand);
goto out;
}
xv_free(rzs->mem_pool, page, offset);
if (clen <= PAGE_SIZE / 2)
- stat_dec(rzs->stats.good_compress);
+ rzs_stat_dec(&rzs->stats.good_compress);
out:
rzs->stats.compr_size -= clen;
- stat_dec(rzs->stats.pages_stored);
+ rzs_stat_dec(&rzs->stats.pages_stored);
rzs->table[index].page = NULL;
rzs->table[index].offset = 0;
*/
if (rzs->backing_swap) {
u32 pagenum;
- stat_dec(rzs->stats.num_reads);
- stat_inc(rzs->stats.bdev_num_reads);
+ rzs_stat64_dec(rzs, &rzs->stats.num_reads);
+ rzs_stat64_inc(rzs, &rzs->stats.bdev_num_reads);
bio->bi_bdev = rzs->backing_swap;
/*
struct zobj_header *zheader;
unsigned char *user_mem, *cmem;
- stat_inc(rzs->stats.num_reads);
+ rzs_stat64_inc(rzs, &rzs->stats.num_reads);
page = bio->bi_io_vec[0].bv_page;
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n",
ret, index);
- stat_inc(rzs->stats.failed_reads);
+ rzs_stat64_inc(rzs, &rzs->stats.failed_reads);
goto out;
}
struct page *page, *page_store;
unsigned char *user_mem, *cmem, *src;
- stat_inc(rzs->stats.num_writes);
+ rzs_stat64_inc(rzs, &rzs->stats.num_writes);
page = bio->bi_io_vec[0].bv_page;
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
* Simply clear zero page flag.
*/
if (rzs_test_flag(rzs, index, RZS_ZERO)) {
- stat_dec(rzs->stats.pages_zero);
+ rzs_stat_dec(&rzs->stats.pages_zero);
rzs_clear_flag(rzs, index, RZS_ZERO);
}
if (page_zero_filled(user_mem)) {
kunmap_atomic(user_mem, KM_USER0);
mutex_unlock(&rzs->lock);
- stat_inc(rzs->stats.pages_zero);
+ rzs_stat_inc(&rzs->stats.pages_zero);
rzs_set_flag(rzs, index, RZS_ZERO);
set_bit(BIO_UPTODATE, &bio->bi_flags);
if (unlikely(ret != LZO_E_OK)) {
mutex_unlock(&rzs->lock);
pr_err("Compression failed! err=%d\n", ret);
- stat_inc(rzs->stats.failed_writes);
+ rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
goto out;
}
mutex_unlock(&rzs->lock);
pr_info("Error allocating memory for incompressible "
"page: %u\n", index);
- stat_inc(rzs->stats.failed_writes);
+ rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
goto out;
}
offset = 0;
rzs_set_flag(rzs, index, RZS_UNCOMPRESSED);
- stat_inc(rzs->stats.pages_expand);
+ rzs_stat_inc(&rzs->stats.pages_expand);
rzs->table[index].page = page_store;
src = kmap_atomic(page, KM_USER0);
goto memstore;
mutex_unlock(&rzs->lock);
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
- stat_inc(rzs->stats.failed_writes);
+ rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
if (rzs->backing_swap)
fwd_write_request = 1;
goto out;
/* Update stats */
rzs->stats.compr_size += clen;
- stat_inc(rzs->stats.pages_stored);
+ rzs_stat_inc(&rzs->stats.pages_stored);
if (clen <= PAGE_SIZE / 2)
- stat_inc(rzs->stats.good_compress);
+ rzs_stat_inc(&rzs->stats.good_compress);
mutex_unlock(&rzs->lock);
out:
if (fwd_write_request) {
- stat_inc(rzs->stats.bdev_num_writes);
+ rzs_stat64_inc(rzs, &rzs->stats.bdev_num_writes);
bio->bi_bdev = rzs->backing_swap;
#if 0
/*
}
if (!valid_swap_request(rzs, bio)) {
- stat_inc(rzs->stats.invalid_io);
+ rzs_stat64_inc(rzs, &rzs->stats.invalid_io);
bio_io_error(bio);
return 0;
}
static int create_device(struct ramzswap *rzs, int device_id)
{
mutex_init(&rzs->lock);
+ spin_lock_init(&rzs->stat64_lock);
INIT_LIST_HEAD(&rzs->backing_swap_extent_list);
rzs->queue = blk_alloc_queue(GFP_KERNEL);
#ifndef _RAMZSWAP_DRV_H_
#define _RAMZSWAP_DRV_H_
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+
#include "ramzswap_ioctl.h"
#include "xvmalloc.h"
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
-/* Debugging and Stats */
-#if defined(CONFIG_RAMZSWAP_STATS)
-#define stat_inc(stat) ((stat)++)
-#define stat_dec(stat) ((stat)--)
-#else
-#define stat_inc(x)
-#define stat_dec(x)
-#endif
-
/* Flags for ramzswap pages (table[page_no].flags) */
enum rzs_pageflags {
/* Page is stored uncompressed */
u64 failed_reads; /* should NEVER! happen */
u64 failed_writes; /* can happen when memory is too low */
u64 invalid_io; /* non-swap I/O requests */
+ u64 notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
void *compress_workmem;
void *compress_buffer;
struct table *table;
+ spinlock_t stat64_lock; /* protect 64-bit stats */
struct mutex lock;
struct request_queue *queue;
struct gendisk *disk;
/*-- */
-#endif
+/* Debugging and Stats */
+#if defined(CONFIG_RAMZSWAP_STATS)
+static void rzs_stat_inc(u32 *v)
+{
+ *v = *v + 1;
+}
+
+static void rzs_stat_dec(u32 *v)
+{
+ *v = *v - 1;
+}
+
+static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v)
+{
+ spin_lock(&rzs->stat64_lock);
+ *v = *v + 1;
+ spin_unlock(&rzs->stat64_lock);
+}
+
+static void rzs_stat64_dec(struct ramzswap *rzs, u64 *v)
+{
+ spin_lock(&rzs->stat64_lock);
+ *v = *v - 1;
+ spin_unlock(&rzs->stat64_lock);
+}
+
+static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
+{
+ u64 val;
+
+ spin_lock(&rzs->stat64_lock);
+ val = *v;
+ spin_unlock(&rzs->stat64_lock);
+
+ return val;
+}
+#else
+#define rzs_stat_inc(v)
+#define rzs_stat_dec(v)
+#define rzs_stat64_inc(r, v)
+#define rzs_stat64_dec(r, v)
+#define rzs_stat64_read(r, v)
+#endif /* CONFIG_RAMZSWAP_STATS */
+#endif