net: phy: icplus: Use the RGMII interface mode to configure clock delays
[firefly-linux-kernel-4.4.55.git] / block / blk-lib.c
index 19cc761cacb2a4b71fe9d4579226025324ffddf5..b3a1f2b70b3166e011e4ef0f617ecd786b7be82b 100644 (file)
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
        int type = REQ_WRITE | REQ_DISCARD;
-       unsigned int max_discard_sectors;
-       unsigned int granularity, alignment, mask;
+       sector_t max_discard_sectors;
+       sector_t granularity, alignment;
        struct bio_batch bb;
        struct bio *bio;
        int ret = 0;
+       struct blk_plug plug;
 
        if (!q)
                return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
-       mask = granularity - 1;
-       alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+       alignment = bdev_discard_alignment(bdev) >> 9;
+       alignment = sector_div(alignment, granularity);
 
        /*
         * Ensure that max_discard_sectors is of the proper
         * granularity, so that requests stay aligned after a split.
         */
        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-       max_discard_sectors = round_down(max_discard_sectors, granularity);
+       sector_div(max_discard_sectors, granularity);
+       max_discard_sectors *= granularity;
        if (unlikely(!max_discard_sectors)) {
                /* Avoid infinite loop below. Being cautious never hurts. */
                return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        bb.flags = 1 << BIO_UPTODATE;
        bb.wait = &wait;
 
+       blk_start_plug(&plug);
        while (nr_sects) {
                unsigned int req_sects;
-               sector_t end_sect;
+               sector_t end_sect, tmp;
 
                bio = bio_alloc(gfp_mask, 1);
                if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                 * misaligned, stop the discard at the previous aligned sector.
                 */
                end_sect = sector + req_sects;
-               if (req_sects < nr_sects && (end_sect & mask) != alignment) {
-                       end_sect =
-                               round_down(end_sect - alignment, granularity)
-                               + alignment;
+               tmp = end_sect;
+               if (req_sects < nr_sects &&
+                   sector_div(tmp, granularity) != alignment) {
+                       end_sect = end_sect - alignment;
+                       sector_div(end_sect, granularity);
+                       end_sect = end_sect * granularity + alignment;
                        req_sects = end_sect - sector;
                }
 
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                atomic_inc(&bb.done);
                submit_bio(type, bio);
        }
+       blk_finish_plug(&plug);
 
        /* Wait for bios in-flight */
        if (!atomic_dec_and_test(&bb.done))
@@ -129,6 +135,80 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
 
+/**
+ * blkdev_issue_write_same - queue a write same operation
+ * @bdev:      target blockdev
+ * @sector:    start sector
+ * @nr_sects:  number of sectors to write
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
+ * @page:      page containing data to write
+ *
+ * Description:
+ *    Issue a write same request for the sectors in question.
+ */
+int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+                           sector_t nr_sects, gfp_t gfp_mask,
+                           struct page *page)
+{
+       DECLARE_COMPLETION_ONSTACK(wait);
+       struct request_queue *q = bdev_get_queue(bdev);
+       unsigned int max_write_same_sectors;
+       struct bio_batch bb;
+       struct bio *bio;
+       int ret = 0;
+
+       if (!q)
+               return -ENXIO;
+
+       max_write_same_sectors = q->limits.max_write_same_sectors;
+
+       if (max_write_same_sectors == 0)
+               return -EOPNOTSUPP;
+
+       atomic_set(&bb.done, 1);
+       bb.flags = 1 << BIO_UPTODATE;
+       bb.wait = &wait;
+
+       while (nr_sects) {
+               bio = bio_alloc(gfp_mask, 1);
+               if (!bio) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               bio->bi_sector = sector;
+               bio->bi_end_io = bio_batch_end_io;
+               bio->bi_bdev = bdev;
+               bio->bi_private = &bb;
+               bio->bi_vcnt = 1;
+               bio->bi_io_vec->bv_page = page;
+               bio->bi_io_vec->bv_offset = 0;
+               bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
+
+               if (nr_sects > max_write_same_sectors) {
+                       bio->bi_size = max_write_same_sectors << 9;
+                       nr_sects -= max_write_same_sectors;
+                       sector += max_write_same_sectors;
+               } else {
+                       bio->bi_size = nr_sects << 9;
+                       nr_sects = 0;
+               }
+
+               atomic_inc(&bb.done);
+               submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
+       }
+
+       /* Wait for bios in-flight */
+       if (!atomic_dec_and_test(&bb.done))
+               wait_for_completion(&wait);
+
+       if (!test_bit(BIO_UPTODATE, &bb.flags))
+               ret = -ENOTSUPP;
+
+       return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_write_same);
+
 /**
  * blkdev_issue_zeroout - generate number of zero filed write bios
  * @bdev:      blockdev to issue
@@ -140,7 +220,7 @@ EXPORT_SYMBOL(blkdev_issue_discard);
  *  Generate and issue number of bios with zerofiled pages.
  */
 
-int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        sector_t nr_sects, gfp_t gfp_mask)
 {
        int ret;
@@ -190,4 +270,32 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 
        return ret;
 }
+
+/**
+ * blkdev_issue_zeroout - zero-fill a block range
+ * @bdev:      blockdev to write
+ * @sector:    start sector
+ * @nr_sects:  number of sectors to write
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ *  Generate and issue number of bios with zerofiled pages.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+                        sector_t nr_sects, gfp_t gfp_mask)
+{
+       if (bdev_write_same(bdev)) {
+               unsigned char bdn[BDEVNAME_SIZE];
+
+               if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+                                            ZERO_PAGE(0)))
+                       return 0;
+
+               bdevname(bdev, bdn);
+               pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
+       }
+
+       return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
+}
 EXPORT_SYMBOL(blkdev_issue_zeroout);