x86: Fix warning in pvclock.c
[firefly-linux-kernel-4.4.55.git] / drivers / md / raid10.c
index 750550c1166fe4725efec1935d38d7eaeb4779a4..7298a5e5a183f35f81e63706fe385dd125174a8b 100644 (file)
@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
 
 static void free_r10bio(r10bio_t *r10_bio)
 {
-       conf_t *conf = mddev_to_conf(r10_bio->mddev);
+       conf_t *conf = r10_bio->mddev->private;
 
        /*
         * Wake up any possible resync thread that waits for the device
@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio)
 
 static void put_buf(r10bio_t *r10_bio)
 {
-       conf_t *conf = mddev_to_conf(r10_bio->mddev);
+       conf_t *conf = r10_bio->mddev->private;
 
        mempool_free(r10_bio, conf->r10buf_pool);
 
@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
 {
        unsigned long flags;
        mddev_t *mddev = r10_bio->mddev;
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
 
        spin_lock_irqsave(&conf->device_lock, flags);
        list_add(&r10_bio->retry_list, &conf->retry_list);
@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
  */
 static inline void update_head_pos(int slot, r10bio_t *r10_bio)
 {
-       conf_t *conf = mddev_to_conf(r10_bio->mddev);
+       conf_t *conf = r10_bio->mddev->private;
 
        conf->mirrors[r10_bio->devs[slot].devnum].head_position =
                r10_bio->devs[slot].addr + (r10_bio->sectors);
@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
        r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
        int slot, dev;
-       conf_t *conf = mddev_to_conf(r10_bio->mddev);
+       conf_t *conf = r10_bio->mddev->private;
 
 
        slot = r10_bio->read_slot;
@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
        r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
        int slot, dev;
-       conf_t *conf = mddev_to_conf(r10_bio->mddev);
+       conf_t *conf = r10_bio->mddev->private;
 
        for (slot = 0; slot < conf->copies; slot++)
                if (r10_bio->devs[slot].bio == bio)
@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
        mddev_t *mddev = q->queuedata;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
-       unsigned int chunk_sectors = mddev->chunk_size >> 9;
+       unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bvm->bi_size >> 9;
 
        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -596,7 +596,7 @@ rb_out:
 
 static void unplug_slaves(mddev_t *mddev)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        int i;
 
        rcu_read_lock();
@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q)
 static int raid10_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        int i, ret = 0;
 
        rcu_read_lock();
@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf)
 static int make_request(struct request_queue *q, struct bio * bio)
 {
        mddev_t *mddev = q->queuedata;
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        mirror_info_t *mirror;
        r10bio_t *r10_bio;
        struct bio *read_bio;
@@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
 
 static void status(struct seq_file *seq, mddev_t *mddev)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        int i;
 
        if (conf->near_copies < conf->raid_disks)
-               seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
+               seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
        if (conf->near_copies > 1)
                seq_printf(seq, " %d near-copies", conf->near_copies);
        if (conf->far_copies > 1) {
@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        char b[BDEVNAME_SIZE];
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
 
        /*
         * If it is not operational, then we have already marked it as dead
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
        for ( ; mirror <= last ; mirror++)
                if ( !(p=conf->mirrors+mirror)->rdev) {
 
-                       blk_queue_stack_limits(mddev->queue,
-                                              rdev->bdev->bd_disk->queue);
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
                        /* as we don't honour merge_bvec_fn, we must never risk
                         * violating it, so limit ->max_sector to one PAGE, as
                         * a one page request is never in violation.
@@ -1215,7 +1215,7 @@ abort:
 static void end_sync_read(struct bio *bio, int error)
 {
        r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
-       conf_t *conf = mddev_to_conf(r10_bio->mddev);
+       conf_t *conf = r10_bio->mddev->private;
        int i,d;
 
        for (i=0; i<conf->copies; i++)
@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error)
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
        r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
        mddev_t *mddev = r10_bio->mddev;
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        int i,d;
 
        for (i = 0; i < conf->copies; i++)
@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error)
  */
 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        int i, first;
        struct bio *tbio, *fbio;
 
@@ -1400,7 +1400,7 @@ done:
 
 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        int i, d;
        struct bio *bio, *wbio;
 
@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev)
        r10bio_t *r10_bio;
        struct bio *bio;
        unsigned long flags;
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        int unplug=0;
        mdk_rdev_t *rdev;
@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev)
                spin_unlock_irqrestore(&conf->device_lock, flags);
 
                mddev = r10_bio->mddev;
-               conf = mddev_to_conf(mddev);
+               conf = mddev->private;
                if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
                        sync_request_write(mddev, r10_bio);
                        unplug = 1;
@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf)
 
 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
        r10bio_t *r10_bio;
        struct bio *biolist = NULL, *bio;
        sector_t max_sector, nr_sectors;
@@ -2026,7 +2026,7 @@ static sector_t
 raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 {
        sector_t size;
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
 
        if (!raid_disks)
                raid_disks = mddev->raid_disks;
@@ -2044,15 +2044,16 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 static int run(mddev_t *mddev)
 {
        conf_t *conf;
-       int i, disk_idx;
+       int i, disk_idx, chunk_size;
        mirror_info_t *disk;
        mdk_rdev_t *rdev;
        int nc, fc, fo;
        sector_t stride, size;
 
-       if (mddev->chunk_size < PAGE_SIZE) {
+       if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
+           !is_power_of_2(mddev->chunk_sectors)) {
                printk(KERN_ERR "md/raid10: chunk size must be "
-                      "at least PAGE_SIZE(%ld).\n", PAGE_SIZE);
+                      "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
                return -EINVAL;
        }
 
@@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev)
        conf->far_copies = fc;
        conf->copies = nc*fc;
        conf->far_offset = fo;
-       conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
-       conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
+       conf->chunk_mask = mddev->chunk_sectors - 1;
+       conf->chunk_shift = ffz(~mddev->chunk_sectors);
        size = mddev->dev_sectors >> conf->chunk_shift;
        sector_div(size, fc);
        size = size * conf->raid_disks;
@@ -2129,6 +2130,14 @@ static int run(mddev_t *mddev)
        spin_lock_init(&conf->device_lock);
        mddev->queue->queue_lock = &conf->device_lock;
 
+       chunk_size = mddev->chunk_sectors << 9;
+       blk_queue_io_min(mddev->queue, chunk_size);
+       if (conf->raid_disks % conf->near_copies)
+               blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+       else
+               blk_queue_io_opt(mddev->queue, chunk_size *
+                                (conf->raid_disks / conf->near_copies));
+
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                disk_idx = rdev->raid_disk;
                if (disk_idx >= mddev->raid_disks
@@ -2137,9 +2146,8 @@ static int run(mddev_t *mddev)
                disk = conf->mirrors + disk_idx;
 
                disk->rdev = rdev;
-
-               blk_queue_stack_limits(mddev->queue,
-                                      rdev->bdev->bd_disk->queue);
+               disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                 rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
                 * violating it, so limit ->max_sector to one PAGE, as
                 * a one page request is never in violation.
@@ -2185,6 +2193,10 @@ static int run(mddev_t *mddev)
                goto out_free_conf;
        }
 
+       if (mddev->recovery_cp != MaxSector)
+               printk(KERN_NOTICE "raid10: %s is not clean"
+                      " -- starting background reconstruction\n",
+                      mdname(mddev));
        printk(KERN_INFO
                "raid10: raid set %s active with %d out of %d devices\n",
                mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2204,7 +2216,8 @@ static int run(mddev_t *mddev)
         * maybe...
         */
        {
-               int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
+               int stripe = conf->raid_disks *
+                       ((mddev->chunk_sectors << 9) / PAGE_SIZE);
                stripe /= conf->near_copies;
                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -2227,7 +2240,7 @@ out:
 
 static int stop(mddev_t *mddev)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
 
        raise_barrier(conf, 0);
        lower_barrier(conf);
@@ -2245,7 +2258,7 @@ static int stop(mddev_t *mddev)
 
 static void raid10_quiesce(mddev_t *mddev, int state)
 {
-       conf_t *conf = mddev_to_conf(mddev);
+       conf_t *conf = mddev->private;
 
        switch(state) {
        case 1: