block: discard granularity might not be power of 2
authorShaohua Li <shli@kernel.org>
Fri, 14 Dec 2012 03:15:36 +0000 (11:15 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 14 Dec 2012 19:46:04 +0000 (20:46 +0100)
In MD raid case, discard granularity might not be power of 2, for example, a
4-disk raid5 has 3*chunk_size discard granularity. Correct the calculation for
such cases.

Reported-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-lib.c
block/blk-settings.c
include/linux/blkdev.h

index 9373b58dfab185878baf9cb196bcd61dd7637894..5677fd33d7d2c4668cc0ce7017c87f04b5c4e6cf 100644 (file)
@@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
        int type = REQ_WRITE | REQ_DISCARD;
-       unsigned int max_discard_sectors;
-       unsigned int granularity, alignment, mask;
+       sector_t max_discard_sectors;
+       sector_t granularity, alignment;
        struct bio_batch bb;
        struct bio *bio;
        int ret = 0;
@@ -57,15 +57,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
-       mask = granularity - 1;
-       alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+       alignment = bdev_discard_alignment(bdev) >> 9;
+       alignment = sector_div(alignment, granularity);
 
        /*
         * Ensure that max_discard_sectors is of the proper
         * granularity, so that requests stay aligned after a split.
         */
        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-       max_discard_sectors = round_down(max_discard_sectors, granularity);
+       sector_div(max_discard_sectors, granularity);
+       max_discard_sectors *= granularity;
        if (unlikely(!max_discard_sectors)) {
                /* Avoid infinite loop below. Being cautious never hurts. */
                return -EOPNOTSUPP;
@@ -83,7 +84,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        while (nr_sects) {
                unsigned int req_sects;
-               sector_t end_sect;
+               sector_t end_sect, tmp;
 
                bio = bio_alloc(gfp_mask, 1);
                if (!bio) {
@@ -98,10 +99,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                 * misaligned, stop the discard at the previous aligned sector.
                 */
                end_sect = sector + req_sects;
-               if (req_sects < nr_sects && (end_sect & mask) != alignment) {
-                       end_sect =
-                               round_down(end_sect - alignment, granularity)
-                               + alignment;
+               tmp = end_sect;
+               if (req_sects < nr_sects &&
+                   sector_div(tmp, granularity) != alignment) {
+                       end_sect = end_sect - alignment;
+                       sector_div(end_sect, granularity);
+                       end_sect = end_sect * granularity + alignment;
                        req_sects = end_sect - sector;
                }
 
index 779bb7646bcd13f871dd4ba20862722db699078c..c50ecf0ea3b17c652db8c134905de38e56713851 100644 (file)
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                        bottom = b->discard_granularity + alignment;
 
                        /* Verify that top and bottom intervals line up */
-                       if (max(top, bottom) & (min(top, bottom) - 1))
+                       if ((max(top, bottom) % min(top, bottom)) != 0)
                                t->discard_misaligned = 1;
                }
 
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                                      b->max_discard_sectors);
                t->discard_granularity = max(t->discard_granularity,
                                             b->discard_granularity);
-               t->discard_alignment = lcm(t->discard_alignment, alignment) &
-                       (t->discard_granularity - 1);
+               t->discard_alignment = lcm(t->discard_alignment, alignment) %
+                       t->discard_granularity;
        }
 
        return ret;
index c9d233e727f27286555c68085318892654202295..acb4f7bbbd32e66544793772aad9cca87e2854d9 100644 (file)
@@ -1188,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q)
 
 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
 {
-       unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+       sector_t alignment = sector << 9;
+       alignment = sector_div(alignment, lim->discard_granularity);
 
        if (!lim->max_discard_sectors)
                return 0;
 
-       return (lim->discard_granularity + lim->discard_alignment - alignment)
-               & (lim->discard_granularity - 1);
+       alignment = lim->discard_granularity + lim->discard_alignment - alignment;
+       return sector_div(alignment, lim->discard_granularity);
 }
 
 static inline int bdev_discard_alignment(struct block_device *bdev)