target: Fix max_unmap_lba_count calc overflow
authorMike Christie <mchristi@redhat.com>
Fri, 3 Jun 2016 01:12:37 +0000 (20:12 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 20 Aug 2016 16:09:26 +0000 (18:09 +0200)
commit ea263c7fada4af8ec7fe5fcfd6e7d7705a89351b upstream.

max_discard_sectors only 32bits, and some non scsi backend
devices will set this to the max 0xffffffff, so we can end up
overflowing during the max_unmap_lba_count calculation.

This fixes a regression caused by my patch:

commit 8a9ebe717a133ba7bc90b06047f43cc6b8bcb8b3
Author: Mike Christie <mchristi@redhat.com>
Date:   Mon Jan 18 14:09:27 2016 -0600

    target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors

which can result in extra discards being sent to due the overflow
causing max_unmap_lba_count to be smaller than what the backing
device can actually support.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
include/target/target_core_backend.h

index 3436a83568ea378c51b41f9fe04a2932756c84a7..dcd5ed26eb1829a0a42868c1357ec03ac9396441 100644 (file)
@@ -832,13 +832,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
  * in ATA and we need to set TPE=1
  */
 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
-                                      struct request_queue *q, int block_size)
+                                      struct request_queue *q)
 {
+       int block_size = queue_logical_block_size(q);
+
        if (!blk_queue_discard(q))
                return false;
 
-       attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
-                                                               block_size;
+       attrib->max_unmap_lba_count =
+               q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
        /*
         * Currently hardcoded to 1 in Linux/SCSI code..
         */
index 75f0f08b2a34f32d0b2bfdd35fa40f027e74ae75..79291869bce6c72d9eb4a72fa6d6f5614a104aa4 100644 (file)
@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
                        dev_size, div_u64(dev_size, fd_dev->fd_block_size),
                        fd_dev->fd_block_size);
 
-               if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
-                                                     fd_dev->fd_block_size))
+               if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
                        pr_debug("IFILE: BLOCK Discard support available,"
                                 " disabled by default\n");
                /*
index 2c53dcefff3e1517fcd70936a9a6b887c1fc3c42..4620c1dcdbc7df46d02ff0b7eeb0ee2b36a46a3b 100644 (file)
@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
-       if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
-                                             dev->dev_attrib.hw_block_size))
+       if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
                pr_debug("IBLOCK: BLOCK Discard support available,"
                         " disabled by default\n");
 
index 28ee5c2e6bcd7b08abeda34669cc2da17f4dcf9a..711322a8ee35ea6a3b9e6f42ce34a59b720f1fdc 100644 (file)
@@ -96,6 +96,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
 bool target_sense_desc_format(struct se_device *dev);
 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
-                                      struct request_queue *q, int block_size);
+                                      struct request_queue *q);
 
 #endif /* TARGET_CORE_BACKEND_H */