UPSTREAM: usb: dwc3: gadget: hold gadget IRQ in dwc->irq_gadget
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-raid.c
index 1d3fe1a40a9bfdaea27bdaeb44208443f08f2d95..a0901214aef57de00419a14c573bc128431749c7 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
@@ -17,6 +17,9 @@
 #include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "raid"
+#define        MAX_RAID_DEVICES        253 /* raid4/5/6 limit */
+
+static bool devices_handle_discard_safely = false;
 
 /*
  * The following flags are used by dm-raid.c to set up the array state.
@@ -43,25 +46,25 @@ struct raid_dev {
 };
 
 /*
- * Flags for rs->print_flags field.
+ * Flags for rs->ctr_flags field.
  */
-#define DMPF_SYNC              0x1
-#define DMPF_NOSYNC            0x2
-#define DMPF_REBUILD           0x4
-#define DMPF_DAEMON_SLEEP      0x8
-#define DMPF_MIN_RECOVERY_RATE 0x10
-#define DMPF_MAX_RECOVERY_RATE 0x20
-#define DMPF_MAX_WRITE_BEHIND  0x40
-#define DMPF_STRIPE_CACHE      0x80
-#define DMPF_REGION_SIZE       0x100
-#define DMPF_RAID10_COPIES     0x200
-#define DMPF_RAID10_FORMAT     0x400
+#define CTR_FLAG_SYNC              0x1
+#define CTR_FLAG_NOSYNC            0x2
+#define CTR_FLAG_REBUILD           0x4
+#define CTR_FLAG_DAEMON_SLEEP      0x8
+#define CTR_FLAG_MIN_RECOVERY_RATE 0x10
+#define CTR_FLAG_MAX_RECOVERY_RATE 0x20
+#define CTR_FLAG_MAX_WRITE_BEHIND  0x40
+#define CTR_FLAG_STRIPE_CACHE      0x80
+#define CTR_FLAG_REGION_SIZE       0x100
+#define CTR_FLAG_RAID10_COPIES     0x200
+#define CTR_FLAG_RAID10_FORMAT     0x400
 
 struct raid_set {
        struct dm_target *ti;
 
        uint32_t bitmap_loaded;
-       uint32_t print_flags;
+       uint32_t ctr_flags;
 
        struct mddev md;
        struct raid_type *raid_type;
@@ -79,6 +82,7 @@ static struct raid_type {
        const unsigned level;           /* RAID level. */
        const unsigned algorithm;       /* RAID algorithm. */
 } raid_types[] = {
+       {"raid0",    "RAID0 (striping)",                0, 2, 0, 0 /* NONE */},
        {"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
        {"raid10",   "RAID10 (striped mirrors)",        0, 2, 10, UINT_MAX /* Varies */},
        {"raid4",    "RAID4 (dedicated parity disk)",   1, 2, 5, ALGORITHM_PARITY_0},
@@ -117,15 +121,15 @@ static int raid10_format_to_md_layout(char *format, unsigned copies)
 {
        unsigned n = 1, f = 1;
 
-       if (!strcmp("near", format))
+       if (!strcasecmp("near", format))
                n = copies;
        else
                f = copies;
 
-       if (!strcmp("offset", format))
+       if (!strcasecmp("offset", format))
                return 0x30000 | (f << 8) | n;
 
-       if (!strcmp("far", format))
+       if (!strcasecmp("far", format))
                return 0x20000 | (f << 8) | n;
 
        return (f << 8) | n;
@@ -325,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
                 */
                if (min_region_size > (1 << 13)) {
                        /* If not a power of 2, make it the next power of 2 */
-                       if (min_region_size & (min_region_size - 1))
-                               region_size = 1 << fls(region_size);
+                       region_size = roundup_pow_of_two(min_region_size);
                        DMINFO("Choosing default region size of %lu sectors",
                               region_size);
                } else {
@@ -380,7 +383,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
 static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned i, rebuild_cnt = 0;
-       unsigned rebuilds_per_group, copies, d;
+       unsigned rebuilds_per_group = 0, copies, d;
        unsigned group_size, last_group_start;
 
        for (i = 0; i < rs->md.raid_disks; i++)
@@ -504,7 +507,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
         * First, parse the in-order required arguments
         * "chunk_size" is the only argument of this type.
         */
-       if ((strict_strtoul(argv[0], 10, &value) < 0)) {
+       if ((kstrtoul(argv[0], 10, &value) < 0)) {
                rs->ti->error = "Bad chunk size";
                return -EINVAL;
        } else if (rs->raid_type->level == 1) {
@@ -551,12 +554,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
        for (i = 0; i < num_raid_params; i++) {
                if (!strcasecmp(argv[i], "nosync")) {
                        rs->md.recovery_cp = MaxSector;
-                       rs->print_flags |= DMPF_NOSYNC;
+                       rs->ctr_flags |= CTR_FLAG_NOSYNC;
                        continue;
                }
                if (!strcasecmp(argv[i], "sync")) {
                        rs->md.recovery_cp = 0;
-                       rs->print_flags |= DMPF_SYNC;
+                       rs->ctr_flags |= CTR_FLAG_SYNC;
                        continue;
                }
 
@@ -581,11 +584,11 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                return -EINVAL;
                        }
                        raid10_format = argv[i];
-                       rs->print_flags |= DMPF_RAID10_FORMAT;
+                       rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
                        continue;
                }
 
-               if (strict_strtoul(argv[i], 10, &value) < 0) {
+               if (kstrtoul(argv[i], 10, &value) < 0) {
                        rs->ti->error = "Bad numerical argument given in raid params";
                        return -EINVAL;
                }
@@ -598,7 +601,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                        }
                        clear_bit(In_sync, &rs->dev[value].rdev.flags);
                        rs->dev[value].rdev.recovery_offset = 0;
-                       rs->print_flags |= DMPF_REBUILD;
+                       rs->ctr_flags |= CTR_FLAG_REBUILD;
                } else if (!strcasecmp(key, "write_mostly")) {
                        if (rs->raid_type->level != 1) {
                                rs->ti->error = "write_mostly option is only valid for RAID1";
@@ -614,7 +617,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                rs->ti->error = "max_write_behind option is only valid for RAID1";
                                return -EINVAL;
                        }
-                       rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
+                       rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
 
                        /*
                         * In device-mapper, we specify things in sectors, but
@@ -627,14 +630,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                        }
                        rs->md.bitmap_info.max_write_behind = value;
                } else if (!strcasecmp(key, "daemon_sleep")) {
-                       rs->print_flags |= DMPF_DAEMON_SLEEP;
+                       rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
                        if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
                                rs->ti->error = "daemon sleep period out of range";
                                return -EINVAL;
                        }
                        rs->md.bitmap_info.daemon_sleep = value;
                } else if (!strcasecmp(key, "stripe_cache")) {
-                       rs->print_flags |= DMPF_STRIPE_CACHE;
+                       rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
 
                        /*
                         * In device-mapper, we specify things in sectors, but
@@ -652,21 +655,21 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                return -EINVAL;
                        }
                } else if (!strcasecmp(key, "min_recovery_rate")) {
-                       rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
+                       rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
                        if (value > INT_MAX) {
                                rs->ti->error = "min_recovery_rate out of range";
                                return -EINVAL;
                        }
                        rs->md.sync_speed_min = (int)value;
                } else if (!strcasecmp(key, "max_recovery_rate")) {
-                       rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
+                       rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
                        if (value > INT_MAX) {
                                rs->ti->error = "max_recovery_rate out of range";
                                return -EINVAL;
                        }
                        rs->md.sync_speed_max = (int)value;
                } else if (!strcasecmp(key, "region_size")) {
-                       rs->print_flags |= DMPF_REGION_SIZE;
+                       rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
                        region_size = value;
                } else if (!strcasecmp(key, "raid10_copies") &&
                           (rs->raid_type->level == 10)) {
@@ -674,7 +677,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                rs->ti->error = "Bad value for 'raid10_copies'";
                                return -EINVAL;
                        }
-                       rs->print_flags |= DMPF_RAID10_COPIES;
+                       rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
                        raid10_copies = value;
                } else {
                        DMERR("Unable to parse RAID parameter: %s", key);
@@ -716,7 +719,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                rs->md.layout = raid10_format_to_md_layout(raid10_format,
                                                           raid10_copies);
                rs->md.new_layout = rs->md.layout;
-       } else if ((rs->raid_type->level > 1) &&
+       } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
                   sector_div(sectors_per_dev,
                              (rs->md.raid_disks - rs->raid_type->parity_devs))) {
                rs->ti->error = "Target length not divisible by number of data devices";
@@ -742,13 +745,7 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
 {
        struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
 
-       if (rs->raid_type->level == 1)
-               return md_raid1_congested(&rs->md, bits);
-
-       if (rs->raid_type->level == 10)
-               return md_raid10_congested(&rs->md, bits);
-
-       return md_raid5_congested(&rs->md, bits);
+       return mddev_congested(&rs->md, bits);
 }
 
 /*
@@ -785,8 +782,7 @@ struct dm_raid_superblock {
        __le32 layout;
        __le32 stripe_sectors;
 
-       __u8 pad[452];          /* Round struct to 512 bytes. */
-                               /* Always set to 0 when writing. */
+       /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
 } __packed;
 
 static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -823,7 +819,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
                    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
                        failed_devices |= (1ULL << i);
 
-       memset(sb, 0, sizeof(*sb));
+       memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
 
        sb->magic = cpu_to_le32(DM_RAID_MAGIC);
        sb->features = cpu_to_le32(0);  /* No features yet */
@@ -858,7 +854,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
        uint64_t events_sb, events_refsb;
 
        rdev->sb_start = 0;
-       rdev->sb_size = sizeof(*sb);
+       rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+       if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
+               DMERR("superblock size of a logical block is no longer valid");
+               return -EINVAL;
+       }
 
        ret = read_disk_sb(rdev, rdev->sb_size);
        if (ret)
@@ -946,7 +946,7 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
                return -EINVAL;
        }
 
-       if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
+       if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
                mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
 
        /*
@@ -1025,8 +1025,9 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
        return 0;
 }
 
-static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
+static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
 {
+       struct mddev *mddev = &rs->md;
        struct dm_raid_superblock *sb = page_address(rdev->sb_page);
 
        /*
@@ -1036,8 +1037,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
        if (!mddev->events && super_init_validation(mddev, rdev))
                return -EINVAL;
 
-       mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
-       rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
+       /* Enable bitmap creation for RAID levels != 0 */
+       mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
+       rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
+
        if (!test_bit(FirstUse, &rdev->flags)) {
                rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
                if (rdev->recovery_offset != MaxSector)
@@ -1072,7 +1075,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
        freshest = NULL;
        rdev_for_each_safe(rdev, tmp, mddev) {
                /*
-                * Skipping super_load due to DMPF_SYNC will cause
+                * Skipping super_load due to CTR_FLAG_SYNC will cause
                 * the array to undergo initialization again as
                 * though it were new.  This is the intended effect
                 * of the "sync" directive.
@@ -1081,7 +1084,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
                 * that the "sync" directive is disallowed during the
                 * reshape.
                 */
-               if (rs->print_flags & DMPF_SYNC)
+               rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
+
+               if (rs->ctr_flags & CTR_FLAG_SYNC)
                        continue;
 
                if (!rdev->meta_bdev)
@@ -1139,16 +1144,63 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
         * validation for the remaining devices.
         */
        ti->error = "Unable to assemble array: Invalid superblocks";
-       if (super_validate(mddev, freshest))
+       if (super_validate(rs, freshest))
                return -EINVAL;
 
        rdev_for_each(rdev, mddev)
-               if ((rdev != freshest) && super_validate(mddev, rdev))
+               if ((rdev != freshest) && super_validate(rs, rdev))
                        return -EINVAL;
 
        return 0;
 }
 
+/*
+ * Enable/disable discard support on RAID set depending on
+ * RAID level and discard properties of underlying RAID members.
+ */
+static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
+{
+       int i;
+       bool raid456;
+
+       /* Assume discards not supported until after checks below. */
+       ti->discards_supported = false;
+
+       /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
+       raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
+
+       for (i = 0; i < rs->md.raid_disks; i++) {
+               struct request_queue *q;
+
+               if (!rs->dev[i].rdev.bdev)
+                       continue;
+
+               q = bdev_get_queue(rs->dev[i].rdev.bdev);
+               if (!q || !blk_queue_discard(q))
+                       return;
+
+               if (raid456) {
+                       if (!q->limits.discard_zeroes_data)
+                               return;
+                       if (!devices_handle_discard_safely) {
+                               DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
+                               DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
+                               return;
+                       }
+               }
+       }
+
+       /* All RAID members properly support discards */
+       ti->discards_supported = true;
+
+       /*
+        * RAID1 and RAID10 personalities require bio splitting,
+        * RAID0/4/5/6 don't and process large discard bios properly.
+        */
+       ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
+       ti->num_discard_bios = 1;
+}
+
 /*
  * Construct a RAID4/5/6 mapping:
  * Args:
@@ -1181,7 +1233,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        argv++;
 
        /* number of RAID parameters */
-       if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
+       if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
                ti->error = "Cannot understand number of RAID parameters";
                return -EINVAL;
        }
@@ -1189,17 +1241,23 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        argv++;
 
        /* Skip over RAID params for now and find out # of devices */
-       if (num_raid_params + 1 > argc) {
+       if (num_raid_params >= argc) {
                ti->error = "Arguments do not agree with counts given";
                return -EINVAL;
        }
 
-       if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
-           (num_raid_devs >= INT_MAX)) {
+       if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
+           (num_raid_devs > MAX_RAID_DEVICES)) {
                ti->error = "Cannot understand number of raid devices";
                return -EINVAL;
        }
 
+       argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
+       if (argc != (num_raid_devs * 2)) {
+               ti->error = "Supplied RAID devices does not match the count given";
+               return -EINVAL;
+       }
+
        rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
        if (IS_ERR(rs))
                return PTR_ERR(rs);
@@ -1208,16 +1266,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        if (ret)
                goto bad;
 
-       ret = -EINVAL;
-
-       argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
        argv += num_raid_params + 1;
 
-       if (argc != (num_raid_devs * 2)) {
-               ti->error = "Supplied RAID devices does not match the count given";
-               goto bad;
-       }
-
        ret = dev_parms(rs, argv);
        if (ret)
                goto bad;
@@ -1231,10 +1281,16 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->private = rs;
        ti->num_flush_bios = 1;
 
-       mutex_lock(&rs->md.reconfig_mutex);
+       /*
+        * Disable/enable discard support on RAID set.
+        */
+       configure_discard_support(ti, rs);
+
+       /* Has to be held on running the array */
+       mddev_lock_nointr(&rs->md);
        ret = md_run(&rs->md);
        rs->md.in_sync = 0; /* Assume already marked dirty */
-       mutex_unlock(&rs->md.reconfig_mutex);
+       mddev_unlock(&rs->md);
 
        if (ret) {
                ti->error = "Fail to run raid array";
@@ -1317,34 +1373,40 @@ static void raid_status(struct dm_target *ti, status_type_t type,
        case STATUSTYPE_INFO:
                DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
 
-               if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
-                       sync = rs->md.curr_resync_completed;
-               else
-                       sync = rs->md.recovery_cp;
-
-               if (sync >= rs->md.resync_max_sectors) {
-                       /*
-                        * Sync complete.
-                        */
+               if (rs->raid_type->level) {
+                       if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
+                               sync = rs->md.curr_resync_completed;
+                       else
+                               sync = rs->md.recovery_cp;
+
+                       if (sync >= rs->md.resync_max_sectors) {
+                               /*
+                                * Sync complete.
+                                */
+                               array_in_sync = 1;
+                               sync = rs->md.resync_max_sectors;
+                       } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
+                               /*
+                                * If "check" or "repair" is occurring, the array has
+                                * undergone and initial sync and the health characters
+                                * should not be 'a' anymore.
+                                */
+                               array_in_sync = 1;
+                       } else {
+                               /*
+                                * The array may be doing an initial sync, or it may
+                                * be rebuilding individual components.  If all the
+                                * devices are In_sync, then it is the array that is
+                                * being initialized.
+                                */
+                               for (i = 0; i < rs->md.raid_disks; i++)
+                                       if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+                                               array_in_sync = 1;
+                       }
+               } else {
+                       /* RAID0 */
                        array_in_sync = 1;
                        sync = rs->md.resync_max_sectors;
-               } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
-                       /*
-                        * If "check" or "repair" is occurring, the array has
-                        * undergone and initial sync and the health characters
-                        * should not be 'a' anymore.
-                        */
-                       array_in_sync = 1;
-               } else {
-                       /*
-                        * The array may be doing an initial sync, or it may
-                        * be rebuilding individual components.  If all the
-                        * devices are In_sync, then it is the array that is
-                        * being initialized.
-                        */
-                       for (i = 0; i < rs->md.raid_disks; i++)
-                               if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
-                                       array_in_sync = 1;
                }
 
                /*
@@ -1388,13 +1450,14 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                 *   performing a "check" of the array.
                 */
                DMEMIT(" %llu",
+                      (strcmp(rs->md.last_sync_action, "check")) ? 0 :
                       (unsigned long long)
                       atomic64_read(&rs->md.resync_mismatches));
                break;
        case STATUSTYPE_TABLE:
                /* The string you would use to construct this array */
                for (i = 0; i < rs->md.raid_disks; i++) {
-                       if ((rs->print_flags & DMPF_REBUILD) &&
+                       if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
                            rs->dev[i].data_dev &&
                            !test_bit(In_sync, &rs->dev[i].rdev.flags))
                                raid_param_cnt += 2; /* for rebuilds */
@@ -1403,33 +1466,33 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                                raid_param_cnt += 2;
                }
 
-               raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
-               if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
+               raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
+               if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
                        raid_param_cnt--;
 
                DMEMIT("%s %u %u", rs->raid_type->name,
                       raid_param_cnt, rs->md.chunk_sectors);
 
-               if ((rs->print_flags & DMPF_SYNC) &&
+               if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
                    (rs->md.recovery_cp == MaxSector))
                        DMEMIT(" sync");
-               if (rs->print_flags & DMPF_NOSYNC)
+               if (rs->ctr_flags & CTR_FLAG_NOSYNC)
                        DMEMIT(" nosync");
 
                for (i = 0; i < rs->md.raid_disks; i++)
-                       if ((rs->print_flags & DMPF_REBUILD) &&
+                       if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
                            rs->dev[i].data_dev &&
                            !test_bit(In_sync, &rs->dev[i].rdev.flags))
                                DMEMIT(" rebuild %u", i);
 
-               if (rs->print_flags & DMPF_DAEMON_SLEEP)
+               if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
                        DMEMIT(" daemon_sleep %lu",
                               rs->md.bitmap_info.daemon_sleep);
 
-               if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
+               if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
                        DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
 
-               if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
+               if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
                        DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
 
                for (i = 0; i < rs->md.raid_disks; i++)
@@ -1437,11 +1500,11 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                            test_bit(WriteMostly, &rs->dev[i].rdev.flags))
                                DMEMIT(" write_mostly %u", i);
 
-               if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
+               if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
                        DMEMIT(" max_write_behind %lu",
                               rs->md.bitmap_info.max_write_behind);
 
-               if (rs->print_flags & DMPF_STRIPE_CACHE) {
+               if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
                        struct r5conf *conf = rs->md.private;
 
                        /* convert from kiB to sectors */
@@ -1449,15 +1512,15 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                               conf ? conf->max_nr_stripes * 2 : 0);
                }
 
-               if (rs->print_flags & DMPF_REGION_SIZE)
+               if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
                        DMEMIT(" region_size %lu",
                               rs->md.bitmap_info.chunksize >> 9);
 
-               if (rs->print_flags & DMPF_RAID10_COPIES)
+               if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
                        DMEMIT(" raid10_copies %u",
                               raid10_md_layout_to_copies(rs->md.layout));
 
-               if (rs->print_flags & DMPF_RAID10_FORMAT)
+               if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
                        DMEMIT(" raid10_format %s",
                               raid10_md_layout_to_format(rs->md.layout));
 
@@ -1572,23 +1635,90 @@ static void raid_postsuspend(struct dm_target *ti)
        mddev_suspend(&rs->md);
 }
 
+static void attempt_restore_of_faulty_devices(struct raid_set *rs)
+{
+       int i;
+       uint64_t failed_devices, cleared_failed_devices = 0;
+       unsigned long flags;
+       struct dm_raid_superblock *sb;
+       struct md_rdev *r;
+
+       for (i = 0; i < rs->md.raid_disks; i++) {
+               r = &rs->dev[i].rdev;
+               if (test_bit(Faulty, &r->flags) && r->sb_page &&
+                   sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+                       DMINFO("Faulty %s device #%d has readable super block."
+                              "  Attempting to revive it.",
+                              rs->raid_type->name, i);
+
+                       /*
+                        * Faulty bit may be set, but sometimes the array can
+                        * be suspended before the personalities can respond
+                        * by removing the device from the array (i.e. calling
+                        * 'hot_remove_disk').  If they haven't yet removed
+                        * the failed device, its 'raid_disk' number will be
+                        * '>= 0' - meaning we must call this function
+                        * ourselves.
+                        */
+                       if ((r->raid_disk >= 0) &&
+                           (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
+                               /* Failed to revive this device, try next */
+                               continue;
+
+                       r->raid_disk = i;
+                       r->saved_raid_disk = i;
+                       flags = r->flags;
+                       clear_bit(Faulty, &r->flags);
+                       clear_bit(WriteErrorSeen, &r->flags);
+                       clear_bit(In_sync, &r->flags);
+                       if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+                               r->raid_disk = -1;
+                               r->saved_raid_disk = -1;
+                               r->flags = flags;
+                       } else {
+                               r->recovery_offset = 0;
+                               cleared_failed_devices |= 1 << i;
+                       }
+               }
+       }
+       if (cleared_failed_devices) {
+               rdev_for_each(r, &rs->md) {
+                       sb = page_address(r->sb_page);
+                       failed_devices = le64_to_cpu(sb->failed_devices);
+                       failed_devices &= ~cleared_failed_devices;
+                       sb->failed_devices = cpu_to_le64(failed_devices);
+               }
+       }
+}
+
 static void raid_resume(struct dm_target *ti)
 {
        struct raid_set *rs = ti->private;
 
-       set_bit(MD_CHANGE_DEVS, &rs->md.flags);
-       if (!rs->bitmap_loaded) {
-               bitmap_load(&rs->md);
-               rs->bitmap_loaded = 1;
+       if (rs->raid_type->level) {
+               set_bit(MD_CHANGE_DEVS, &rs->md.flags);
+
+               if (!rs->bitmap_loaded) {
+                       bitmap_load(&rs->md);
+                       rs->bitmap_loaded = 1;
+               } else {
+                       /*
+                        * A secondary resume while the device is active.
+                        * Take this opportunity to check whether any failed
+                        * devices are reachable again.
+                        */
+                       attempt_restore_of_faulty_devices(rs);
+               }
+
+               clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
        }
 
-       clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
        mddev_resume(&rs->md);
 }
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 5, 0},
+       .version = {1, 7, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
@@ -1619,6 +1749,10 @@ static void __exit dm_raid_exit(void)
 module_init(dm_raid_init);
 module_exit(dm_raid_exit);
 
+module_param(devices_handle_discard_safely, bool, 0644);
+MODULE_PARM_DESC(devices_handle_discard_safely,
+                "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
+
 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
 MODULE_ALIAS("dm-raid1");
 MODULE_ALIAS("dm-raid10");