2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/string.h>
43 #include <linux/hdreg.h>
44 #include <linux/proc_fs.h>
45 #include <linux/random.h>
46 #include <linux/reboot.h>
47 #include <linux/file.h>
48 #include <linux/compat.h>
49 #include <linux/delay.h>
50 #include <linux/raid/md_p.h>
51 #include <linux/raid/md_u.h>
56 #define dprintk(x...) ((void)(DEBUG && printk(x)))
60 static void autostart_arrays(int part);
63 static LIST_HEAD(pers_list);
64 static DEFINE_SPINLOCK(pers_lock);
66 static void md_print_devices(void);
68 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
70 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
73 * Default number of read corrections we'll attempt on an rdev
74 * before ejecting it from the array. We divide the read error
75 * count by 2 for every hour elapsed between read errors.
77 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
79 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
80 * is 1000 KB/sec, so the extra system load does not show up that much.
81 * Increase it if you want to have more _guaranteed_ speed. Note that
82 * the RAID driver will use the maximum available bandwidth if the IO
83 * subsystem is idle. There is also an 'absolute maximum' reconstruction
84 * speed limit - in case reconstruction slows down your system despite
87 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88 * or /sys/block/mdX/md/sync_speed_{min,max}
91 static int sysctl_speed_limit_min = 1000;
92 static int sysctl_speed_limit_max = 200000;
93 static inline int speed_min(mddev_t *mddev)
95 return mddev->sync_speed_min ?
96 mddev->sync_speed_min : sysctl_speed_limit_min;
99 static inline int speed_max(mddev_t *mddev)
101 return mddev->sync_speed_max ?
102 mddev->sync_speed_max : sysctl_speed_limit_max;
105 static struct ctl_table_header *raid_table_header;
107 static ctl_table raid_table[] = {
109 .procname = "speed_limit_min",
110 .data = &sysctl_speed_limit_min,
111 .maxlen = sizeof(int),
112 .mode = S_IRUGO|S_IWUSR,
113 .proc_handler = proc_dointvec,
116 .procname = "speed_limit_max",
117 .data = &sysctl_speed_limit_max,
118 .maxlen = sizeof(int),
119 .mode = S_IRUGO|S_IWUSR,
120 .proc_handler = proc_dointvec,
125 static ctl_table raid_dir_table[] = {
129 .mode = S_IRUGO|S_IXUGO,
135 static ctl_table raid_root_table[] = {
140 .child = raid_dir_table,
145 static const struct block_device_operations md_fops;
147 static int start_readonly;
150 * We have a system wide 'event count' that is incremented
151 * on any 'interesting' event, and readers of /proc/mdstat
152 * can use 'poll' or 'select' to find out when the event
156 * start array, stop array, error, add device, remove device,
157 * start build, activate spare
159 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
160 static atomic_t md_event_count;
161 void md_new_event(mddev_t *mddev)
163 atomic_inc(&md_event_count);
164 wake_up(&md_event_waiters);
166 EXPORT_SYMBOL_GPL(md_new_event);
168 /* Alternate version that can be called from interrupts
169 * when calling sysfs_notify isn't needed.
171 static void md_new_event_inintr(mddev_t *mddev)
173 atomic_inc(&md_event_count);
174 wake_up(&md_event_waiters);
178 * Enables to iterate over all existing md arrays
179 * all_mddevs_lock protects this list.
181 static LIST_HEAD(all_mddevs);
182 static DEFINE_SPINLOCK(all_mddevs_lock);
186 * iterates through all used mddevs in the system.
187 * We take care to grab the all_mddevs_lock whenever navigating
188 * the list, and to always hold a refcount when unlocked.
189 * Any code which breaks out of this loop while own
190 * a reference to the current mddev and must mddev_put it.
192 #define for_each_mddev(mddev,tmp) \
194 for (({ spin_lock(&all_mddevs_lock); \
195 tmp = all_mddevs.next; \
197 ({ if (tmp != &all_mddevs) \
198 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
199 spin_unlock(&all_mddevs_lock); \
200 if (mddev) mddev_put(mddev); \
201 mddev = list_entry(tmp, mddev_t, all_mddevs); \
202 tmp != &all_mddevs;}); \
203 ({ spin_lock(&all_mddevs_lock); \
208 /* Rather than calling directly into the personality make_request function,
209 * IO requests come here first so that we can check if the device is
210 * being suspended pending a reconfiguration.
211 * We hold a refcount over the call to ->make_request. By the time that
212 * call has finished, the bio has been linked into some internal structure
213 * and so is visible to ->quiesce(), so we don't need the refcount any more.
215 static int md_make_request(struct request_queue *q, struct bio *bio)
217 const int rw = bio_data_dir(bio);
218 mddev_t *mddev = q->queuedata;
222 if (mddev == NULL || mddev->pers == NULL) {
227 if (mddev->suspended || mddev->barrier) {
230 prepare_to_wait(&mddev->sb_wait, &__wait,
231 TASK_UNINTERRUPTIBLE);
232 if (!mddev->suspended && !mddev->barrier)
238 finish_wait(&mddev->sb_wait, &__wait);
240 atomic_inc(&mddev->active_io);
243 rv = mddev->pers->make_request(mddev, bio);
245 cpu = part_stat_lock();
246 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
247 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
251 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
252 wake_up(&mddev->sb_wait);
257 /* mddev_suspend makes sure no new requests are submitted
258 * to the device, and that any requests that have been submitted
259 * are completely handled.
260 * Once ->stop is called and completes, the module will be completely
263 static void mddev_suspend(mddev_t *mddev)
265 BUG_ON(mddev->suspended);
266 mddev->suspended = 1;
268 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
269 mddev->pers->quiesce(mddev, 1);
272 static void mddev_resume(mddev_t *mddev)
274 mddev->suspended = 0;
275 wake_up(&mddev->sb_wait);
276 mddev->pers->quiesce(mddev, 0);
279 int mddev_congested(mddev_t *mddev, int bits)
283 return mddev->suspended;
285 EXPORT_SYMBOL(mddev_congested);
288 * Generic barrier handling for md
291 #define POST_REQUEST_BARRIER ((void*)1)
293 static void md_end_barrier(struct bio *bio, int err)
295 mdk_rdev_t *rdev = bio->bi_private;
296 mddev_t *mddev = rdev->mddev;
297 if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
298 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
300 rdev_dec_pending(rdev, mddev);
302 if (atomic_dec_and_test(&mddev->flush_pending)) {
303 if (mddev->barrier == POST_REQUEST_BARRIER) {
304 /* This was a post-request barrier */
305 mddev->barrier = NULL;
306 wake_up(&mddev->sb_wait);
308 /* The pre-request barrier has finished */
309 schedule_work(&mddev->barrier_work);
314 static void submit_barriers(mddev_t *mddev)
319 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
320 if (rdev->raid_disk >= 0 &&
321 !test_bit(Faulty, &rdev->flags)) {
322 /* Take two references, one is dropped
323 * when request finishes, one after
324 * we reclaim rcu_read_lock
327 atomic_inc(&rdev->nr_pending);
328 atomic_inc(&rdev->nr_pending);
330 bi = bio_alloc(GFP_KERNEL, 0);
331 bi->bi_end_io = md_end_barrier;
332 bi->bi_private = rdev;
333 bi->bi_bdev = rdev->bdev;
334 atomic_inc(&mddev->flush_pending);
335 submit_bio(WRITE_BARRIER, bi);
337 rdev_dec_pending(rdev, mddev);
342 static void md_submit_barrier(struct work_struct *ws)
344 mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
345 struct bio *bio = mddev->barrier;
347 atomic_set(&mddev->flush_pending, 1);
349 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
350 bio_endio(bio, -EOPNOTSUPP);
351 else if (bio->bi_size == 0)
352 /* an empty barrier - all done */
355 bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
356 if (mddev->pers->make_request(mddev, bio))
357 generic_make_request(bio);
358 mddev->barrier = POST_REQUEST_BARRIER;
359 submit_barriers(mddev);
361 if (atomic_dec_and_test(&mddev->flush_pending)) {
362 mddev->barrier = NULL;
363 wake_up(&mddev->sb_wait);
367 void md_barrier_request(mddev_t *mddev, struct bio *bio)
369 spin_lock_irq(&mddev->write_lock);
370 wait_event_lock_irq(mddev->sb_wait,
372 mddev->write_lock, /*nothing*/);
373 mddev->barrier = bio;
374 spin_unlock_irq(&mddev->write_lock);
376 atomic_set(&mddev->flush_pending, 1);
377 INIT_WORK(&mddev->barrier_work, md_submit_barrier);
379 submit_barriers(mddev);
381 if (atomic_dec_and_test(&mddev->flush_pending))
382 schedule_work(&mddev->barrier_work);
384 EXPORT_SYMBOL(md_barrier_request);
386 static inline mddev_t *mddev_get(mddev_t *mddev)
388 atomic_inc(&mddev->active);
392 static void mddev_delayed_delete(struct work_struct *ws);
394 static void mddev_put(mddev_t *mddev)
396 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
398 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
399 mddev->ctime == 0 && !mddev->hold_active) {
400 /* Array is not configured at all, and not held active,
402 list_del(&mddev->all_mddevs);
403 if (mddev->gendisk) {
404 /* we did a probe so need to clean up.
405 * Call schedule_work inside the spinlock
406 * so that flush_scheduled_work() after
407 * mddev_find will succeed in waiting for the
410 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
411 schedule_work(&mddev->del_work);
415 spin_unlock(&all_mddevs_lock);
418 static void mddev_init(mddev_t *mddev)
420 mutex_init(&mddev->open_mutex);
421 mutex_init(&mddev->reconfig_mutex);
422 mutex_init(&mddev->bitmap_info.mutex);
423 INIT_LIST_HEAD(&mddev->disks);
424 INIT_LIST_HEAD(&mddev->all_mddevs);
425 init_timer(&mddev->safemode_timer);
426 atomic_set(&mddev->active, 1);
427 atomic_set(&mddev->openers, 0);
428 atomic_set(&mddev->active_io, 0);
429 spin_lock_init(&mddev->write_lock);
430 atomic_set(&mddev->flush_pending, 0);
431 init_waitqueue_head(&mddev->sb_wait);
432 init_waitqueue_head(&mddev->recovery_wait);
433 mddev->reshape_position = MaxSector;
434 mddev->resync_min = 0;
435 mddev->resync_max = MaxSector;
436 mddev->level = LEVEL_NONE;
439 static mddev_t * mddev_find(dev_t unit)
441 mddev_t *mddev, *new = NULL;
444 spin_lock(&all_mddevs_lock);
447 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
448 if (mddev->unit == unit) {
450 spin_unlock(&all_mddevs_lock);
456 list_add(&new->all_mddevs, &all_mddevs);
457 spin_unlock(&all_mddevs_lock);
458 new->hold_active = UNTIL_IOCTL;
462 /* find an unused unit number */
463 static int next_minor = 512;
464 int start = next_minor;
468 dev = MKDEV(MD_MAJOR, next_minor);
470 if (next_minor > MINORMASK)
472 if (next_minor == start) {
473 /* Oh dear, all in use. */
474 spin_unlock(&all_mddevs_lock);
480 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
481 if (mddev->unit == dev) {
487 new->md_minor = MINOR(dev);
488 new->hold_active = UNTIL_STOP;
489 list_add(&new->all_mddevs, &all_mddevs);
490 spin_unlock(&all_mddevs_lock);
493 spin_unlock(&all_mddevs_lock);
495 new = kzalloc(sizeof(*new), GFP_KERNEL);
500 if (MAJOR(unit) == MD_MAJOR)
501 new->md_minor = MINOR(unit);
503 new->md_minor = MINOR(unit) >> MdpMinorShift;
510 static inline int mddev_lock(mddev_t * mddev)
512 return mutex_lock_interruptible(&mddev->reconfig_mutex);
515 static inline int mddev_is_locked(mddev_t *mddev)
517 return mutex_is_locked(&mddev->reconfig_mutex);
520 static inline int mddev_trylock(mddev_t * mddev)
522 return mutex_trylock(&mddev->reconfig_mutex);
525 static struct attribute_group md_redundancy_group;
527 static void mddev_unlock(mddev_t * mddev)
529 if (mddev->to_remove) {
530 /* These cannot be removed under reconfig_mutex as
531 * an access to the files will try to take reconfig_mutex
532 * while holding the file unremovable, which leads to
534 * So hold open_mutex instead - we are allowed to take
535 * it while holding reconfig_mutex, and md_run can
536 * use it to wait for the remove to complete.
538 struct attribute_group *to_remove = mddev->to_remove;
539 mddev->to_remove = NULL;
540 mutex_lock(&mddev->open_mutex);
541 mutex_unlock(&mddev->reconfig_mutex);
543 if (to_remove != &md_redundancy_group)
544 sysfs_remove_group(&mddev->kobj, to_remove);
545 if (mddev->pers == NULL ||
546 mddev->pers->sync_request == NULL) {
547 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
548 if (mddev->sysfs_action)
549 sysfs_put(mddev->sysfs_action);
550 mddev->sysfs_action = NULL;
552 mutex_unlock(&mddev->open_mutex);
554 mutex_unlock(&mddev->reconfig_mutex);
556 md_wakeup_thread(mddev->thread);
559 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
563 list_for_each_entry(rdev, &mddev->disks, same_set)
564 if (rdev->desc_nr == nr)
570 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
574 list_for_each_entry(rdev, &mddev->disks, same_set)
575 if (rdev->bdev->bd_dev == dev)
581 static struct mdk_personality *find_pers(int level, char *clevel)
583 struct mdk_personality *pers;
584 list_for_each_entry(pers, &pers_list, list) {
585 if (level != LEVEL_NONE && pers->level == level)
587 if (strcmp(pers->name, clevel)==0)
593 /* return the offset of the super block in 512byte sectors */
594 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
596 sector_t num_sectors = bdev->bd_inode->i_size / 512;
597 return MD_NEW_SIZE_SECTORS(num_sectors);
600 static int alloc_disk_sb(mdk_rdev_t * rdev)
605 rdev->sb_page = alloc_page(GFP_KERNEL);
606 if (!rdev->sb_page) {
607 printk(KERN_ALERT "md: out of memory.\n");
614 static void free_disk_sb(mdk_rdev_t * rdev)
617 put_page(rdev->sb_page);
619 rdev->sb_page = NULL;
626 static void super_written(struct bio *bio, int error)
628 mdk_rdev_t *rdev = bio->bi_private;
629 mddev_t *mddev = rdev->mddev;
631 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
632 printk("md: super_written gets error=%d, uptodate=%d\n",
633 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
634 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
635 md_error(mddev, rdev);
638 if (atomic_dec_and_test(&mddev->pending_writes))
639 wake_up(&mddev->sb_wait);
643 static void super_written_barrier(struct bio *bio, int error)
645 struct bio *bio2 = bio->bi_private;
646 mdk_rdev_t *rdev = bio2->bi_private;
647 mddev_t *mddev = rdev->mddev;
649 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
650 error == -EOPNOTSUPP) {
652 /* barriers don't appear to be supported :-( */
653 set_bit(BarriersNotsupp, &rdev->flags);
654 mddev->barriers_work = 0;
655 spin_lock_irqsave(&mddev->write_lock, flags);
656 bio2->bi_next = mddev->biolist;
657 mddev->biolist = bio2;
658 spin_unlock_irqrestore(&mddev->write_lock, flags);
659 wake_up(&mddev->sb_wait);
663 bio->bi_private = rdev;
664 super_written(bio, error);
668 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
669 sector_t sector, int size, struct page *page)
671 /* write first size bytes of page to sector of rdev
672 * Increment mddev->pending_writes before returning
673 * and decrement it on completion, waking up sb_wait
674 * if zero is reached.
675 * If an error occurred, call md_error
677 * As we might need to resubmit the request if BIO_RW_BARRIER
678 * causes ENOTSUPP, we allocate a spare bio...
680 struct bio *bio = bio_alloc(GFP_NOIO, 1);
681 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
683 bio->bi_bdev = rdev->bdev;
684 bio->bi_sector = sector;
685 bio_add_page(bio, page, size, 0);
686 bio->bi_private = rdev;
687 bio->bi_end_io = super_written;
690 atomic_inc(&mddev->pending_writes);
691 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
693 rw |= (1<<BIO_RW_BARRIER);
694 rbio = bio_clone(bio, GFP_NOIO);
695 rbio->bi_private = bio;
696 rbio->bi_end_io = super_written_barrier;
697 submit_bio(rw, rbio);
702 void md_super_wait(mddev_t *mddev)
704 /* wait for all superblock writes that were scheduled to complete.
705 * if any had to be retried (due to BARRIER problems), retry them
709 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
710 if (atomic_read(&mddev->pending_writes)==0)
712 while (mddev->biolist) {
714 spin_lock_irq(&mddev->write_lock);
715 bio = mddev->biolist;
716 mddev->biolist = bio->bi_next ;
718 spin_unlock_irq(&mddev->write_lock);
719 submit_bio(bio->bi_rw, bio);
723 finish_wait(&mddev->sb_wait, &wq);
726 static void bi_complete(struct bio *bio, int error)
728 complete((struct completion*)bio->bi_private);
731 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
732 struct page *page, int rw)
734 struct bio *bio = bio_alloc(GFP_NOIO, 1);
735 struct completion event;
738 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
741 bio->bi_sector = sector;
742 bio_add_page(bio, page, size, 0);
743 init_completion(&event);
744 bio->bi_private = &event;
745 bio->bi_end_io = bi_complete;
747 wait_for_completion(&event);
749 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
753 EXPORT_SYMBOL_GPL(sync_page_io);
755 static int read_disk_sb(mdk_rdev_t * rdev, int size)
757 char b[BDEVNAME_SIZE];
758 if (!rdev->sb_page) {
766 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
772 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
773 bdevname(rdev->bdev,b));
777 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
779 return sb1->set_uuid0 == sb2->set_uuid0 &&
780 sb1->set_uuid1 == sb2->set_uuid1 &&
781 sb1->set_uuid2 == sb2->set_uuid2 &&
782 sb1->set_uuid3 == sb2->set_uuid3;
785 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
788 mdp_super_t *tmp1, *tmp2;
790 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
791 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
793 if (!tmp1 || !tmp2) {
795 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
803 * nr_disks is not constant
808 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
816 static u32 md_csum_fold(u32 csum)
818 csum = (csum & 0xffff) + (csum >> 16);
819 return (csum & 0xffff) + (csum >> 16);
822 static unsigned int calc_sb_csum(mdp_super_t * sb)
825 u32 *sb32 = (u32*)sb;
827 unsigned int disk_csum, csum;
829 disk_csum = sb->sb_csum;
832 for (i = 0; i < MD_SB_BYTES/4 ; i++)
834 csum = (newcsum & 0xffffffff) + (newcsum>>32);
838 /* This used to use csum_partial, which was wrong for several
839 * reasons including that different results are returned on
840 * different architectures. It isn't critical that we get exactly
841 * the same return value as before (we always csum_fold before
842 * testing, and that removes any differences). However as we
843 * know that csum_partial always returned a 16bit value on
844 * alphas, do a fold to maximise conformity to previous behaviour.
846 sb->sb_csum = md_csum_fold(disk_csum);
848 sb->sb_csum = disk_csum;
855 * Handle superblock details.
856 * We want to be able to handle multiple superblock formats
857 * so we have a common interface to them all, and an array of
858 * different handlers.
859 * We rely on user-space to write the initial superblock, and support
860 * reading and updating of superblocks.
861 * Interface methods are:
862 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
863 * loads and validates a superblock on dev.
864 * if refdev != NULL, compare superblocks on both devices
866 * 0 - dev has a superblock that is compatible with refdev
867 * 1 - dev has a superblock that is compatible and newer than refdev
868 * so dev should be used as the refdev in future
869 * -EINVAL superblock incompatible or invalid
870 * -othererror e.g. -EIO
872 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
873 * Verify that dev is acceptable into mddev.
874 * The first time, mddev->raid_disks will be 0, and data from
875 * dev should be merged in. Subsequent calls check that dev
876 * is new enough. Return 0 or -EINVAL
878 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
879 * Update the superblock for rdev with data in mddev
880 * This does not write to disc.
886 struct module *owner;
887 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
889 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
890 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
891 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
892 sector_t num_sectors);
896 * Check that the given mddev has no bitmap.
898 * This function is called from the run method of all personalities that do not
899 * support bitmaps. It prints an error message and returns non-zero if mddev
900 * has a bitmap. Otherwise, it returns 0.
903 int md_check_no_bitmap(mddev_t *mddev)
905 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
907 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
908 mdname(mddev), mddev->pers->name);
911 EXPORT_SYMBOL(md_check_no_bitmap);
914 * load_super for 0.90.0
916 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
918 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
923 * Calculate the position of the superblock (512byte sectors),
924 * it's at the end of the disk.
926 * It also happens to be a multiple of 4Kb.
928 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
930 ret = read_disk_sb(rdev, MD_SB_BYTES);
935 bdevname(rdev->bdev, b);
936 sb = (mdp_super_t*)page_address(rdev->sb_page);
938 if (sb->md_magic != MD_SB_MAGIC) {
939 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
944 if (sb->major_version != 0 ||
945 sb->minor_version < 90 ||
946 sb->minor_version > 91) {
947 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
948 sb->major_version, sb->minor_version,
953 if (sb->raid_disks <= 0)
956 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
957 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
962 rdev->preferred_minor = sb->md_minor;
963 rdev->data_offset = 0;
964 rdev->sb_size = MD_SB_BYTES;
966 if (sb->level == LEVEL_MULTIPATH)
969 rdev->desc_nr = sb->this_disk.number;
975 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
976 if (!uuid_equal(refsb, sb)) {
977 printk(KERN_WARNING "md: %s has different UUID to %s\n",
978 b, bdevname(refdev->bdev,b2));
981 if (!sb_equal(refsb, sb)) {
982 printk(KERN_WARNING "md: %s has same UUID"
983 " but different superblock to %s\n",
984 b, bdevname(refdev->bdev, b2));
988 ev2 = md_event(refsb);
994 rdev->sectors = rdev->sb_start;
996 if (rdev->sectors < sb->size * 2 && sb->level > 1)
997 /* "this cannot possibly happen" ... */
1005 * validate_super for 0.90.0
1007 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1010 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
1011 __u64 ev1 = md_event(sb);
1013 rdev->raid_disk = -1;
1014 clear_bit(Faulty, &rdev->flags);
1015 clear_bit(In_sync, &rdev->flags);
1016 clear_bit(WriteMostly, &rdev->flags);
1017 clear_bit(BarriersNotsupp, &rdev->flags);
1019 if (mddev->raid_disks == 0) {
1020 mddev->major_version = 0;
1021 mddev->minor_version = sb->minor_version;
1022 mddev->patch_version = sb->patch_version;
1023 mddev->external = 0;
1024 mddev->chunk_sectors = sb->chunk_size >> 9;
1025 mddev->ctime = sb->ctime;
1026 mddev->utime = sb->utime;
1027 mddev->level = sb->level;
1028 mddev->clevel[0] = 0;
1029 mddev->layout = sb->layout;
1030 mddev->raid_disks = sb->raid_disks;
1031 mddev->dev_sectors = sb->size * 2;
1032 mddev->events = ev1;
1033 mddev->bitmap_info.offset = 0;
1034 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1036 if (mddev->minor_version >= 91) {
1037 mddev->reshape_position = sb->reshape_position;
1038 mddev->delta_disks = sb->delta_disks;
1039 mddev->new_level = sb->new_level;
1040 mddev->new_layout = sb->new_layout;
1041 mddev->new_chunk_sectors = sb->new_chunk >> 9;
1043 mddev->reshape_position = MaxSector;
1044 mddev->delta_disks = 0;
1045 mddev->new_level = mddev->level;
1046 mddev->new_layout = mddev->layout;
1047 mddev->new_chunk_sectors = mddev->chunk_sectors;
1050 if (sb->state & (1<<MD_SB_CLEAN))
1051 mddev->recovery_cp = MaxSector;
1053 if (sb->events_hi == sb->cp_events_hi &&
1054 sb->events_lo == sb->cp_events_lo) {
1055 mddev->recovery_cp = sb->recovery_cp;
1057 mddev->recovery_cp = 0;
1060 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1061 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1062 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1063 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1065 mddev->max_disks = MD_SB_DISKS;
1067 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1068 mddev->bitmap_info.file == NULL)
1069 mddev->bitmap_info.offset =
1070 mddev->bitmap_info.default_offset;
1072 } else if (mddev->pers == NULL) {
1073 /* Insist on good event counter while assembling */
1075 if (ev1 < mddev->events)
1077 } else if (mddev->bitmap) {
1078 /* if adding to array with a bitmap, then we can accept an
1079 * older device ... but not too old.
1081 if (ev1 < mddev->bitmap->events_cleared)
1084 if (ev1 < mddev->events)
1085 /* just a hot-add of a new device, leave raid_disk at -1 */
1089 if (mddev->level != LEVEL_MULTIPATH) {
1090 desc = sb->disks + rdev->desc_nr;
1092 if (desc->state & (1<<MD_DISK_FAULTY))
1093 set_bit(Faulty, &rdev->flags);
1094 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1095 desc->raid_disk < mddev->raid_disks */) {
1096 set_bit(In_sync, &rdev->flags);
1097 rdev->raid_disk = desc->raid_disk;
1098 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1099 /* active but not in sync implies recovery up to
1100 * reshape position. We don't know exactly where
1101 * that is, so set to zero for now */
1102 if (mddev->minor_version >= 91) {
1103 rdev->recovery_offset = 0;
1104 rdev->raid_disk = desc->raid_disk;
1107 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1108 set_bit(WriteMostly, &rdev->flags);
1109 } else /* MULTIPATH are always insync */
1110 set_bit(In_sync, &rdev->flags);
1115 * sync_super for 0.90.0
1117 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1121 int next_spare = mddev->raid_disks;
1124 /* make rdev->sb match mddev data..
1127 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1128 * 3/ any empty disks < next_spare become removed
1130 * disks[0] gets initialised to REMOVED because
1131 * we cannot be sure from other fields if it has
1132 * been initialised or not.
1135 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1137 rdev->sb_size = MD_SB_BYTES;
1139 sb = (mdp_super_t*)page_address(rdev->sb_page);
1141 memset(sb, 0, sizeof(*sb));
1143 sb->md_magic = MD_SB_MAGIC;
1144 sb->major_version = mddev->major_version;
1145 sb->patch_version = mddev->patch_version;
1146 sb->gvalid_words = 0; /* ignored */
1147 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1148 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1149 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1150 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1152 sb->ctime = mddev->ctime;
1153 sb->level = mddev->level;
1154 sb->size = mddev->dev_sectors / 2;
1155 sb->raid_disks = mddev->raid_disks;
1156 sb->md_minor = mddev->md_minor;
1157 sb->not_persistent = 0;
1158 sb->utime = mddev->utime;
1160 sb->events_hi = (mddev->events>>32);
1161 sb->events_lo = (u32)mddev->events;
1163 if (mddev->reshape_position == MaxSector)
1164 sb->minor_version = 90;
1166 sb->minor_version = 91;
1167 sb->reshape_position = mddev->reshape_position;
1168 sb->new_level = mddev->new_level;
1169 sb->delta_disks = mddev->delta_disks;
1170 sb->new_layout = mddev->new_layout;
1171 sb->new_chunk = mddev->new_chunk_sectors << 9;
1173 mddev->minor_version = sb->minor_version;
1176 sb->recovery_cp = mddev->recovery_cp;
1177 sb->cp_events_hi = (mddev->events>>32);
1178 sb->cp_events_lo = (u32)mddev->events;
1179 if (mddev->recovery_cp == MaxSector)
1180 sb->state = (1<< MD_SB_CLEAN);
1182 sb->recovery_cp = 0;
1184 sb->layout = mddev->layout;
1185 sb->chunk_size = mddev->chunk_sectors << 9;
1187 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1188 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1190 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1191 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1194 int is_active = test_bit(In_sync, &rdev2->flags);
1196 if (rdev2->raid_disk >= 0 &&
1197 sb->minor_version >= 91)
1198 /* we have nowhere to store the recovery_offset,
1199 * but if it is not below the reshape_position,
1200 * we can piggy-back on that.
1203 if (rdev2->raid_disk < 0 ||
1204 test_bit(Faulty, &rdev2->flags))
1207 desc_nr = rdev2->raid_disk;
1209 desc_nr = next_spare++;
1210 rdev2->desc_nr = desc_nr;
1211 d = &sb->disks[rdev2->desc_nr];
1213 d->number = rdev2->desc_nr;
1214 d->major = MAJOR(rdev2->bdev->bd_dev);
1215 d->minor = MINOR(rdev2->bdev->bd_dev);
1217 d->raid_disk = rdev2->raid_disk;
1219 d->raid_disk = rdev2->desc_nr; /* compatibility */
1220 if (test_bit(Faulty, &rdev2->flags))
1221 d->state = (1<<MD_DISK_FAULTY);
1222 else if (is_active) {
1223 d->state = (1<<MD_DISK_ACTIVE);
1224 if (test_bit(In_sync, &rdev2->flags))
1225 d->state |= (1<<MD_DISK_SYNC);
1233 if (test_bit(WriteMostly, &rdev2->flags))
1234 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1236 /* now set the "removed" and "faulty" bits on any missing devices */
1237 for (i=0 ; i < mddev->raid_disks ; i++) {
1238 mdp_disk_t *d = &sb->disks[i];
1239 if (d->state == 0 && d->number == 0) {
1242 d->state = (1<<MD_DISK_REMOVED);
1243 d->state |= (1<<MD_DISK_FAULTY);
1247 sb->nr_disks = nr_disks;
1248 sb->active_disks = active;
1249 sb->working_disks = working;
1250 sb->failed_disks = failed;
1251 sb->spare_disks = spare;
1253 sb->this_disk = sb->disks[rdev->desc_nr];
1254 sb->sb_csum = calc_sb_csum(sb);
1258 * rdev_size_change for 0.90.0
1260 static unsigned long long
1261 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1263 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1264 return 0; /* component must fit device */
1265 if (rdev->mddev->bitmap_info.offset)
1266 return 0; /* can't move bitmap */
1267 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1268 if (!num_sectors || num_sectors > rdev->sb_start)
1269 num_sectors = rdev->sb_start;
1270 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1272 md_super_wait(rdev->mddev);
1273 return num_sectors / 2; /* kB for sysfs */
1278 * version 1 superblock
1281 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1285 unsigned long long newcsum;
1286 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1287 __le32 *isuper = (__le32*)sb;
1290 disk_csum = sb->sb_csum;
1293 for (i=0; size>=4; size -= 4 )
1294 newcsum += le32_to_cpu(*isuper++);
1297 newcsum += le16_to_cpu(*(__le16*) isuper);
1299 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1300 sb->sb_csum = disk_csum;
1301 return cpu_to_le32(csum);
1304 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1306 struct mdp_superblock_1 *sb;
1309 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1313 * Calculate the position of the superblock in 512byte sectors.
1314 * It is always aligned to a 4K boundary and
1315 * depeding on minor_version, it can be:
1316 * 0: At least 8K, but less than 12K, from end of device
1317 * 1: At start of device
1318 * 2: 4K from start of device.
1320 switch(minor_version) {
1322 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1324 sb_start &= ~(sector_t)(4*2-1);
1335 rdev->sb_start = sb_start;
1337 /* superblock is rarely larger than 1K, but it can be larger,
1338 * and it is safe to read 4k, so we do that
1340 ret = read_disk_sb(rdev, 4096);
1341 if (ret) return ret;
1344 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1346 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1347 sb->major_version != cpu_to_le32(1) ||
1348 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1349 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1350 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1353 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1354 printk("md: invalid superblock checksum on %s\n",
1355 bdevname(rdev->bdev,b));
1358 if (le64_to_cpu(sb->data_size) < 10) {
1359 printk("md: data_size too small on %s\n",
1360 bdevname(rdev->bdev,b));
1364 rdev->preferred_minor = 0xffff;
1365 rdev->data_offset = le64_to_cpu(sb->data_offset);
1366 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1368 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1369 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1370 if (rdev->sb_size & bmask)
1371 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1374 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1377 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1380 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1386 struct mdp_superblock_1 *refsb =
1387 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1389 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1390 sb->level != refsb->level ||
1391 sb->layout != refsb->layout ||
1392 sb->chunksize != refsb->chunksize) {
1393 printk(KERN_WARNING "md: %s has strangely different"
1394 " superblock to %s\n",
1395 bdevname(rdev->bdev,b),
1396 bdevname(refdev->bdev,b2));
1399 ev1 = le64_to_cpu(sb->events);
1400 ev2 = le64_to_cpu(refsb->events);
1408 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1409 le64_to_cpu(sb->data_offset);
1411 rdev->sectors = rdev->sb_start;
1412 if (rdev->sectors < le64_to_cpu(sb->data_size))
1414 rdev->sectors = le64_to_cpu(sb->data_size);
1415 if (le64_to_cpu(sb->size) > rdev->sectors)
1420 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1422 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1423 __u64 ev1 = le64_to_cpu(sb->events);
1425 rdev->raid_disk = -1;
1426 clear_bit(Faulty, &rdev->flags);
1427 clear_bit(In_sync, &rdev->flags);
1428 clear_bit(WriteMostly, &rdev->flags);
1429 clear_bit(BarriersNotsupp, &rdev->flags);
1431 if (mddev->raid_disks == 0) {
1432 mddev->major_version = 1;
1433 mddev->patch_version = 0;
1434 mddev->external = 0;
1435 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1436 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1437 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1438 mddev->level = le32_to_cpu(sb->level);
1439 mddev->clevel[0] = 0;
1440 mddev->layout = le32_to_cpu(sb->layout);
1441 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1442 mddev->dev_sectors = le64_to_cpu(sb->size);
1443 mddev->events = ev1;
1444 mddev->bitmap_info.offset = 0;
1445 mddev->bitmap_info.default_offset = 1024 >> 9;
1447 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1448 memcpy(mddev->uuid, sb->set_uuid, 16);
1450 mddev->max_disks = (4096-256)/2;
1452 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1453 mddev->bitmap_info.file == NULL )
1454 mddev->bitmap_info.offset =
1455 (__s32)le32_to_cpu(sb->bitmap_offset);
1457 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1458 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1459 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1460 mddev->new_level = le32_to_cpu(sb->new_level);
1461 mddev->new_layout = le32_to_cpu(sb->new_layout);
1462 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1464 mddev->reshape_position = MaxSector;
1465 mddev->delta_disks = 0;
1466 mddev->new_level = mddev->level;
1467 mddev->new_layout = mddev->layout;
1468 mddev->new_chunk_sectors = mddev->chunk_sectors;
1471 } else if (mddev->pers == NULL) {
1472 /* Insist of good event counter while assembling */
1474 if (ev1 < mddev->events)
1476 } else if (mddev->bitmap) {
1477 /* If adding to array with a bitmap, then we can accept an
1478 * older device, but not too old.
1480 if (ev1 < mddev->bitmap->events_cleared)
1483 if (ev1 < mddev->events)
1484 /* just a hot-add of a new device, leave raid_disk at -1 */
1487 if (mddev->level != LEVEL_MULTIPATH) {
1489 if (rdev->desc_nr < 0 ||
1490 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1494 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1496 case 0xffff: /* spare */
1498 case 0xfffe: /* faulty */
1499 set_bit(Faulty, &rdev->flags);
1502 if ((le32_to_cpu(sb->feature_map) &
1503 MD_FEATURE_RECOVERY_OFFSET))
1504 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1506 set_bit(In_sync, &rdev->flags);
1507 rdev->raid_disk = role;
1510 if (sb->devflags & WriteMostly1)
1511 set_bit(WriteMostly, &rdev->flags);
1512 } else /* MULTIPATH are always insync */
1513 set_bit(In_sync, &rdev->flags);
1518 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1520 struct mdp_superblock_1 *sb;
1523 /* make rdev->sb match mddev and rdev data. */
1525 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1527 sb->feature_map = 0;
1529 sb->recovery_offset = cpu_to_le64(0);
1530 memset(sb->pad1, 0, sizeof(sb->pad1));
1531 memset(sb->pad2, 0, sizeof(sb->pad2));
1532 memset(sb->pad3, 0, sizeof(sb->pad3));
1534 sb->utime = cpu_to_le64((__u64)mddev->utime);
1535 sb->events = cpu_to_le64(mddev->events);
1537 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1539 sb->resync_offset = cpu_to_le64(0);
1541 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1543 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1544 sb->size = cpu_to_le64(mddev->dev_sectors);
1545 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1546 sb->level = cpu_to_le32(mddev->level);
1547 sb->layout = cpu_to_le32(mddev->layout);
1549 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1550 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1551 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1554 if (rdev->raid_disk >= 0 &&
1555 !test_bit(In_sync, &rdev->flags)) {
1557 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1558 sb->recovery_offset =
1559 cpu_to_le64(rdev->recovery_offset);
1562 if (mddev->reshape_position != MaxSector) {
1563 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1564 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1565 sb->new_layout = cpu_to_le32(mddev->new_layout);
1566 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1567 sb->new_level = cpu_to_le32(mddev->new_level);
1568 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1572 list_for_each_entry(rdev2, &mddev->disks, same_set)
1573 if (rdev2->desc_nr+1 > max_dev)
1574 max_dev = rdev2->desc_nr+1;
1576 if (max_dev > le32_to_cpu(sb->max_dev)) {
1578 sb->max_dev = cpu_to_le32(max_dev);
1579 rdev->sb_size = max_dev * 2 + 256;
1580 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1581 if (rdev->sb_size & bmask)
1582 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1584 for (i=0; i<max_dev;i++)
1585 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1587 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1589 if (test_bit(Faulty, &rdev2->flags))
1590 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1591 else if (test_bit(In_sync, &rdev2->flags))
1592 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1593 else if (rdev2->raid_disk >= 0)
1594 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1596 sb->dev_roles[i] = cpu_to_le16(0xffff);
1599 sb->sb_csum = calc_sb_1_csum(sb);
1602 static unsigned long long
1603 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1605 struct mdp_superblock_1 *sb;
1606 sector_t max_sectors;
1607 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1608 return 0; /* component must fit device */
1609 if (rdev->sb_start < rdev->data_offset) {
1610 /* minor versions 1 and 2; superblock before data */
1611 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1612 max_sectors -= rdev->data_offset;
1613 if (!num_sectors || num_sectors > max_sectors)
1614 num_sectors = max_sectors;
1615 } else if (rdev->mddev->bitmap_info.offset) {
1616 /* minor version 0 with bitmap we can't move */
1619 /* minor version 0; superblock after data */
1621 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1622 sb_start &= ~(sector_t)(4*2 - 1);
1623 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1624 if (!num_sectors || num_sectors > max_sectors)
1625 num_sectors = max_sectors;
1626 rdev->sb_start = sb_start;
1628 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1629 sb->data_size = cpu_to_le64(num_sectors);
1630 sb->super_offset = rdev->sb_start;
1631 sb->sb_csum = calc_sb_1_csum(sb);
1632 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1634 md_super_wait(rdev->mddev);
1635 return num_sectors / 2; /* kB for sysfs */
1638 static struct super_type super_types[] = {
1641 .owner = THIS_MODULE,
1642 .load_super = super_90_load,
1643 .validate_super = super_90_validate,
1644 .sync_super = super_90_sync,
1645 .rdev_size_change = super_90_rdev_size_change,
1649 .owner = THIS_MODULE,
1650 .load_super = super_1_load,
1651 .validate_super = super_1_validate,
1652 .sync_super = super_1_sync,
1653 .rdev_size_change = super_1_rdev_size_change,
1657 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1659 mdk_rdev_t *rdev, *rdev2;
1662 rdev_for_each_rcu(rdev, mddev1)
1663 rdev_for_each_rcu(rdev2, mddev2)
1664 if (rdev->bdev->bd_contains ==
1665 rdev2->bdev->bd_contains) {
1673 static LIST_HEAD(pending_raid_disks);
1676 * Try to register data integrity profile for an mddev
1678 * This is called when an array is started and after a disk has been kicked
1679 * from the array. It only succeeds if all working and active component devices
1680 * are integrity capable with matching profiles.
1682 int md_integrity_register(mddev_t *mddev)
1684 mdk_rdev_t *rdev, *reference = NULL;
1686 if (list_empty(&mddev->disks))
1687 return 0; /* nothing to do */
1688 if (blk_get_integrity(mddev->gendisk))
1689 return 0; /* already registered */
1690 list_for_each_entry(rdev, &mddev->disks, same_set) {
1691 /* skip spares and non-functional disks */
1692 if (test_bit(Faulty, &rdev->flags))
1694 if (rdev->raid_disk < 0)
1697 * If at least one rdev is not integrity capable, we can not
1698 * enable data integrity for the md device.
1700 if (!bdev_get_integrity(rdev->bdev))
1703 /* Use the first rdev as the reference */
1707 /* does this rdev's profile match the reference profile? */
1708 if (blk_integrity_compare(reference->bdev->bd_disk,
1709 rdev->bdev->bd_disk) < 0)
1713 * All component devices are integrity capable and have matching
1714 * profiles, register the common profile for the md device.
1716 if (blk_integrity_register(mddev->gendisk,
1717 bdev_get_integrity(reference->bdev)) != 0) {
1718 printk(KERN_ERR "md: failed to register integrity for %s\n",
1722 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1726 EXPORT_SYMBOL(md_integrity_register);
1728 /* Disable data integrity if non-capable/non-matching disk is being added */
1729 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1731 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1732 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1734 if (!bi_mddev) /* nothing to do */
1736 if (rdev->raid_disk < 0) /* skip spares */
1738 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1739 rdev->bdev->bd_disk) >= 0)
1741 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1742 blk_integrity_unregister(mddev->gendisk);
1744 EXPORT_SYMBOL(md_integrity_add_rdev);
1746 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1748 char b[BDEVNAME_SIZE];
1758 /* prevent duplicates */
1759 if (find_rdev(mddev, rdev->bdev->bd_dev))
1762 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1763 if (rdev->sectors && (mddev->dev_sectors == 0 ||
1764 rdev->sectors < mddev->dev_sectors)) {
1766 /* Cannot change size, so fail
1767 * If mddev->level <= 0, then we don't care
1768 * about aligning sizes (e.g. linear)
1770 if (mddev->level > 0)
1773 mddev->dev_sectors = rdev->sectors;
1776 /* Verify rdev->desc_nr is unique.
1777 * If it is -1, assign a free number, else
1778 * check number is not in use
1780 if (rdev->desc_nr < 0) {
1782 if (mddev->pers) choice = mddev->raid_disks;
1783 while (find_rdev_nr(mddev, choice))
1785 rdev->desc_nr = choice;
1787 if (find_rdev_nr(mddev, rdev->desc_nr))
1790 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1791 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1792 mdname(mddev), mddev->max_disks);
1795 bdevname(rdev->bdev,b);
1796 while ( (s=strchr(b, '/')) != NULL)
1799 rdev->mddev = mddev;
1800 printk(KERN_INFO "md: bind<%s>\n", b);
1802 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1805 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1806 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1807 kobject_del(&rdev->kobj);
1810 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1812 list_add_rcu(&rdev->same_set, &mddev->disks);
1813 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1815 /* May as well allow recovery to be retried once */
1816 mddev->recovery_disabled = 0;
1821 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1826 static void md_delayed_delete(struct work_struct *ws)
1828 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1829 kobject_del(&rdev->kobj);
1830 kobject_put(&rdev->kobj);
1833 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1835 char b[BDEVNAME_SIZE];
1840 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1841 list_del_rcu(&rdev->same_set);
1842 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1844 sysfs_remove_link(&rdev->kobj, "block");
1845 sysfs_put(rdev->sysfs_state);
1846 rdev->sysfs_state = NULL;
1847 /* We need to delay this, otherwise we can deadlock when
1848 * writing to 'remove' to "dev/state". We also need
1849 * to delay it due to rcu usage.
1852 INIT_WORK(&rdev->del_work, md_delayed_delete);
1853 kobject_get(&rdev->kobj);
1854 schedule_work(&rdev->del_work);
1858 * prevent the device from being mounted, repartitioned or
1859 * otherwise reused by a RAID array (or any other kernel
1860 * subsystem), by bd_claiming the device.
1862 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1865 struct block_device *bdev;
1866 char b[BDEVNAME_SIZE];
1868 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1870 printk(KERN_ERR "md: could not open %s.\n",
1871 __bdevname(dev, b));
1872 return PTR_ERR(bdev);
1874 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1876 printk(KERN_ERR "md: could not bd_claim %s.\n",
1878 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1882 set_bit(AllReserved, &rdev->flags);
1887 static void unlock_rdev(mdk_rdev_t *rdev)
1889 struct block_device *bdev = rdev->bdev;
1894 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1897 void md_autodetect_dev(dev_t dev);
1899 static void export_rdev(mdk_rdev_t * rdev)
1901 char b[BDEVNAME_SIZE];
1902 printk(KERN_INFO "md: export_rdev(%s)\n",
1903 bdevname(rdev->bdev,b));
1908 if (test_bit(AutoDetected, &rdev->flags))
1909 md_autodetect_dev(rdev->bdev->bd_dev);
1912 kobject_put(&rdev->kobj);
1915 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1917 unbind_rdev_from_array(rdev);
1921 static void export_array(mddev_t *mddev)
1923 mdk_rdev_t *rdev, *tmp;
1925 rdev_for_each(rdev, tmp, mddev) {
1930 kick_rdev_from_array(rdev);
1932 if (!list_empty(&mddev->disks))
1934 mddev->raid_disks = 0;
1935 mddev->major_version = 0;
1938 static void print_desc(mdp_disk_t *desc)
1940 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1941 desc->major,desc->minor,desc->raid_disk,desc->state);
1944 static void print_sb_90(mdp_super_t *sb)
1949 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1950 sb->major_version, sb->minor_version, sb->patch_version,
1951 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1953 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1954 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1955 sb->md_minor, sb->layout, sb->chunk_size);
1956 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1957 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1958 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1959 sb->failed_disks, sb->spare_disks,
1960 sb->sb_csum, (unsigned long)sb->events_lo);
1963 for (i = 0; i < MD_SB_DISKS; i++) {
1966 desc = sb->disks + i;
1967 if (desc->number || desc->major || desc->minor ||
1968 desc->raid_disk || (desc->state && (desc->state != 4))) {
1969 printk(" D %2d: ", i);
1973 printk(KERN_INFO "md: THIS: ");
1974 print_desc(&sb->this_disk);
1977 static void print_sb_1(struct mdp_superblock_1 *sb)
1981 uuid = sb->set_uuid;
1983 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
1984 "md: Name: \"%s\" CT:%llu\n",
1985 le32_to_cpu(sb->major_version),
1986 le32_to_cpu(sb->feature_map),
1989 (unsigned long long)le64_to_cpu(sb->ctime)
1990 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1992 uuid = sb->device_uuid;
1994 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1996 "md: Dev:%08x UUID: %pU\n"
1997 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1998 "md: (MaxDev:%u) \n",
1999 le32_to_cpu(sb->level),
2000 (unsigned long long)le64_to_cpu(sb->size),
2001 le32_to_cpu(sb->raid_disks),
2002 le32_to_cpu(sb->layout),
2003 le32_to_cpu(sb->chunksize),
2004 (unsigned long long)le64_to_cpu(sb->data_offset),
2005 (unsigned long long)le64_to_cpu(sb->data_size),
2006 (unsigned long long)le64_to_cpu(sb->super_offset),
2007 (unsigned long long)le64_to_cpu(sb->recovery_offset),
2008 le32_to_cpu(sb->dev_number),
2011 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2012 (unsigned long long)le64_to_cpu(sb->events),
2013 (unsigned long long)le64_to_cpu(sb->resync_offset),
2014 le32_to_cpu(sb->sb_csum),
2015 le32_to_cpu(sb->max_dev)
2019 static void print_rdev(mdk_rdev_t *rdev, int major_version)
2021 char b[BDEVNAME_SIZE];
2022 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2023 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2024 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2026 if (rdev->sb_loaded) {
2027 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2028 switch (major_version) {
2030 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
2033 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
2037 printk(KERN_INFO "md: no rdev superblock!\n");
2040 static void md_print_devices(void)
2042 struct list_head *tmp;
2045 char b[BDEVNAME_SIZE];
2048 printk("md: **********************************\n");
2049 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
2050 printk("md: **********************************\n");
2051 for_each_mddev(mddev, tmp) {
2054 bitmap_print_sb(mddev->bitmap);
2056 printk("%s: ", mdname(mddev));
2057 list_for_each_entry(rdev, &mddev->disks, same_set)
2058 printk("<%s>", bdevname(rdev->bdev,b));
2061 list_for_each_entry(rdev, &mddev->disks, same_set)
2062 print_rdev(rdev, mddev->major_version);
2064 printk("md: **********************************\n");
2069 static void sync_sbs(mddev_t * mddev, int nospares)
2071 /* Update each superblock (in-memory image), but
2072 * if we are allowed to, skip spares which already
2073 * have the right event counter, or have one earlier
2074 * (which would mean they aren't being marked as dirty
2075 * with the rest of the array)
2079 /* First make sure individual recovery_offsets are correct */
2080 list_for_each_entry(rdev, &mddev->disks, same_set) {
2081 if (rdev->raid_disk >= 0 &&
2082 !test_bit(In_sync, &rdev->flags) &&
2083 mddev->curr_resync_completed > rdev->recovery_offset)
2084 rdev->recovery_offset = mddev->curr_resync_completed;
2087 list_for_each_entry(rdev, &mddev->disks, same_set) {
2088 if (rdev->sb_events == mddev->events ||
2090 rdev->raid_disk < 0 &&
2091 (rdev->sb_events&1)==0 &&
2092 rdev->sb_events+1 == mddev->events)) {
2093 /* Don't update this superblock */
2094 rdev->sb_loaded = 2;
2096 super_types[mddev->major_version].
2097 sync_super(mddev, rdev);
2098 rdev->sb_loaded = 1;
2103 static void md_update_sb(mddev_t * mddev, int force_change)
2109 mddev->utime = get_seconds();
2110 if (mddev->external)
2113 spin_lock_irq(&mddev->write_lock);
2115 set_bit(MD_CHANGE_PENDING, &mddev->flags);
2116 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2118 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2119 /* just a clean<-> dirty transition, possibly leave spares alone,
2120 * though if events isn't the right even/odd, we will have to do
2126 if (mddev->degraded)
2127 /* If the array is degraded, then skipping spares is both
2128 * dangerous and fairly pointless.
2129 * Dangerous because a device that was removed from the array
2130 * might have a event_count that still looks up-to-date,
2131 * so it can be re-added without a resync.
2132 * Pointless because if there are any spares to skip,
2133 * then a recovery will happen and soon that array won't
2134 * be degraded any more and the spare can go back to sleep then.
2138 sync_req = mddev->in_sync;
2140 /* If this is just a dirty<->clean transition, and the array is clean
2141 * and 'events' is odd, we can roll back to the previous clean state */
2143 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2144 && (mddev->events & 1)
2145 && mddev->events != 1)
2148 /* otherwise we have to go forward and ... */
2150 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
2151 /* .. if the array isn't clean, an 'even' event must also go
2153 if ((mddev->events&1)==0) {
2155 sync_req = 2; /* force a second update to get the
2156 * even/odd in sync */
2159 /* otherwise an 'odd' event must go to spares */
2160 if ((mddev->events&1)) {
2162 sync_req = 2; /* force a second update to get the
2163 * even/odd in sync */
2168 if (!mddev->events) {
2170 * oops, this 64-bit counter should never wrap.
2171 * Either we are in around ~1 trillion A.C., assuming
2172 * 1 reboot per second, or we have a bug:
2179 * do not write anything to disk if using
2180 * nonpersistent superblocks
2182 if (!mddev->persistent) {
2183 if (!mddev->external)
2184 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2186 spin_unlock_irq(&mddev->write_lock);
2187 wake_up(&mddev->sb_wait);
2190 sync_sbs(mddev, nospares);
2191 spin_unlock_irq(&mddev->write_lock);
2194 "md: updating %s RAID superblock on device (in sync %d)\n",
2195 mdname(mddev),mddev->in_sync);
2197 bitmap_update_sb(mddev->bitmap);
2198 list_for_each_entry(rdev, &mddev->disks, same_set) {
2199 char b[BDEVNAME_SIZE];
2200 dprintk(KERN_INFO "md: ");
2201 if (rdev->sb_loaded != 1)
2202 continue; /* no noise on spare devices */
2203 if (test_bit(Faulty, &rdev->flags))
2204 dprintk("(skipping faulty ");
2206 dprintk("%s ", bdevname(rdev->bdev,b));
2207 if (!test_bit(Faulty, &rdev->flags)) {
2208 md_super_write(mddev,rdev,
2209 rdev->sb_start, rdev->sb_size,
2211 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2212 bdevname(rdev->bdev,b),
2213 (unsigned long long)rdev->sb_start);
2214 rdev->sb_events = mddev->events;
2218 if (mddev->level == LEVEL_MULTIPATH)
2219 /* only need to write one superblock... */
2222 md_super_wait(mddev);
2223 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2225 spin_lock_irq(&mddev->write_lock);
2226 if (mddev->in_sync != sync_req ||
2227 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2228 /* have to write it out again */
2229 spin_unlock_irq(&mddev->write_lock);
2232 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2233 spin_unlock_irq(&mddev->write_lock);
2234 wake_up(&mddev->sb_wait);
2235 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2236 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2240 /* words written to sysfs files may, or may not, be \n terminated.
2241 * We want to accept with case. For this we use cmd_match.
2243 static int cmd_match(const char *cmd, const char *str)
2245 /* See if cmd, written into a sysfs file, matches
2246 * str. They must either be the same, or cmd can
2247 * have a trailing newline
2249 while (*cmd && *str && *cmd == *str) {
2260 struct rdev_sysfs_entry {
2261 struct attribute attr;
2262 ssize_t (*show)(mdk_rdev_t *, char *);
2263 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2267 state_show(mdk_rdev_t *rdev, char *page)
2272 if (test_bit(Faulty, &rdev->flags)) {
2273 len+= sprintf(page+len, "%sfaulty",sep);
2276 if (test_bit(In_sync, &rdev->flags)) {
2277 len += sprintf(page+len, "%sin_sync",sep);
2280 if (test_bit(WriteMostly, &rdev->flags)) {
2281 len += sprintf(page+len, "%swrite_mostly",sep);
2284 if (test_bit(Blocked, &rdev->flags)) {
2285 len += sprintf(page+len, "%sblocked", sep);
2288 if (!test_bit(Faulty, &rdev->flags) &&
2289 !test_bit(In_sync, &rdev->flags)) {
2290 len += sprintf(page+len, "%sspare", sep);
2293 return len+sprintf(page+len, "\n");
2297 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2300 * faulty - simulates and error
2301 * remove - disconnects the device
2302 * writemostly - sets write_mostly
2303 * -writemostly - clears write_mostly
2304 * blocked - sets the Blocked flag
2305 * -blocked - clears the Blocked flag
2306 * insync - sets Insync providing device isn't active
2309 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2310 md_error(rdev->mddev, rdev);
2312 } else if (cmd_match(buf, "remove")) {
2313 if (rdev->raid_disk >= 0)
2316 mddev_t *mddev = rdev->mddev;
2317 kick_rdev_from_array(rdev);
2319 md_update_sb(mddev, 1);
2320 md_new_event(mddev);
2323 } else if (cmd_match(buf, "writemostly")) {
2324 set_bit(WriteMostly, &rdev->flags);
2326 } else if (cmd_match(buf, "-writemostly")) {
2327 clear_bit(WriteMostly, &rdev->flags);
2329 } else if (cmd_match(buf, "blocked")) {
2330 set_bit(Blocked, &rdev->flags);
2332 } else if (cmd_match(buf, "-blocked")) {
2333 clear_bit(Blocked, &rdev->flags);
2334 wake_up(&rdev->blocked_wait);
2335 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2336 md_wakeup_thread(rdev->mddev->thread);
2339 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2340 set_bit(In_sync, &rdev->flags);
2343 if (!err && rdev->sysfs_state)
2344 sysfs_notify_dirent(rdev->sysfs_state);
2345 return err ? err : len;
2347 static struct rdev_sysfs_entry rdev_state =
2348 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2351 errors_show(mdk_rdev_t *rdev, char *page)
2353 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2357 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2360 unsigned long n = simple_strtoul(buf, &e, 10);
2361 if (*buf && (*e == 0 || *e == '\n')) {
2362 atomic_set(&rdev->corrected_errors, n);
2367 static struct rdev_sysfs_entry rdev_errors =
2368 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2371 slot_show(mdk_rdev_t *rdev, char *page)
2373 if (rdev->raid_disk < 0)
2374 return sprintf(page, "none\n");
2376 return sprintf(page, "%d\n", rdev->raid_disk);
2380 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2385 int slot = simple_strtoul(buf, &e, 10);
2386 if (strncmp(buf, "none", 4)==0)
2388 else if (e==buf || (*e && *e!= '\n'))
2390 if (rdev->mddev->pers && slot == -1) {
2391 /* Setting 'slot' on an active array requires also
2392 * updating the 'rd%d' link, and communicating
2393 * with the personality with ->hot_*_disk.
2394 * For now we only support removing
2395 * failed/spare devices. This normally happens automatically,
2396 * but not when the metadata is externally managed.
2398 if (rdev->raid_disk == -1)
2400 /* personality does all needed checks */
2401 if (rdev->mddev->pers->hot_add_disk == NULL)
2403 err = rdev->mddev->pers->
2404 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2407 sprintf(nm, "rd%d", rdev->raid_disk);
2408 sysfs_remove_link(&rdev->mddev->kobj, nm);
2409 rdev->raid_disk = -1;
2410 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2411 md_wakeup_thread(rdev->mddev->thread);
2412 } else if (rdev->mddev->pers) {
2414 /* Activating a spare .. or possibly reactivating
2415 * if we ever get bitmaps working here.
2418 if (rdev->raid_disk != -1)
2421 if (rdev->mddev->pers->hot_add_disk == NULL)
2424 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2425 if (rdev2->raid_disk == slot)
2428 rdev->raid_disk = slot;
2429 if (test_bit(In_sync, &rdev->flags))
2430 rdev->saved_raid_disk = slot;
2432 rdev->saved_raid_disk = -1;
2433 err = rdev->mddev->pers->
2434 hot_add_disk(rdev->mddev, rdev);
2436 rdev->raid_disk = -1;
2439 sysfs_notify_dirent(rdev->sysfs_state);
2440 sprintf(nm, "rd%d", rdev->raid_disk);
2441 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2443 "md: cannot register "
2445 nm, mdname(rdev->mddev));
2447 /* don't wakeup anyone, leave that to userspace. */
2449 if (slot >= rdev->mddev->raid_disks)
2451 rdev->raid_disk = slot;
2452 /* assume it is working */
2453 clear_bit(Faulty, &rdev->flags);
2454 clear_bit(WriteMostly, &rdev->flags);
2455 set_bit(In_sync, &rdev->flags);
2456 sysfs_notify_dirent(rdev->sysfs_state);
2462 static struct rdev_sysfs_entry rdev_slot =
2463 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2466 offset_show(mdk_rdev_t *rdev, char *page)
2468 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2472 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2475 unsigned long long offset = simple_strtoull(buf, &e, 10);
2476 if (e==buf || (*e && *e != '\n'))
2478 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2480 if (rdev->sectors && rdev->mddev->external)
2481 /* Must set offset before size, so overlap checks
2484 rdev->data_offset = offset;
2488 static struct rdev_sysfs_entry rdev_offset =
2489 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2492 rdev_size_show(mdk_rdev_t *rdev, char *page)
2494 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2497 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2499 /* check if two start/length pairs overlap */
2507 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2509 unsigned long long blocks;
2512 if (strict_strtoull(buf, 10, &blocks) < 0)
2515 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2516 return -EINVAL; /* sector conversion overflow */
2519 if (new != blocks * 2)
2520 return -EINVAL; /* unsigned long long to sector_t overflow */
2527 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2529 mddev_t *my_mddev = rdev->mddev;
2530 sector_t oldsectors = rdev->sectors;
2533 if (strict_blocks_to_sectors(buf, §ors) < 0)
2535 if (my_mddev->pers && rdev->raid_disk >= 0) {
2536 if (my_mddev->persistent) {
2537 sectors = super_types[my_mddev->major_version].
2538 rdev_size_change(rdev, sectors);
2541 } else if (!sectors)
2542 sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2545 if (sectors < my_mddev->dev_sectors)
2546 return -EINVAL; /* component must fit device */
2548 rdev->sectors = sectors;
2549 if (sectors > oldsectors && my_mddev->external) {
2550 /* need to check that all other rdevs with the same ->bdev
2551 * do not overlap. We need to unlock the mddev to avoid
2552 * a deadlock. We have already changed rdev->sectors, and if
2553 * we have to change it back, we will have the lock again.
2557 struct list_head *tmp;
2559 mddev_unlock(my_mddev);
2560 for_each_mddev(mddev, tmp) {
2564 list_for_each_entry(rdev2, &mddev->disks, same_set)
2565 if (test_bit(AllReserved, &rdev2->flags) ||
2566 (rdev->bdev == rdev2->bdev &&
2568 overlaps(rdev->data_offset, rdev->sectors,
2574 mddev_unlock(mddev);
2580 mddev_lock(my_mddev);
2582 /* Someone else could have slipped in a size
2583 * change here, but doing so is just silly.
2584 * We put oldsectors back because we *know* it is
2585 * safe, and trust userspace not to race with
2588 rdev->sectors = oldsectors;
2595 static struct rdev_sysfs_entry rdev_size =
2596 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2599 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
2601 unsigned long long recovery_start = rdev->recovery_offset;
2603 if (test_bit(In_sync, &rdev->flags) ||
2604 recovery_start == MaxSector)
2605 return sprintf(page, "none\n");
2607 return sprintf(page, "%llu\n", recovery_start);
2610 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2612 unsigned long long recovery_start;
2614 if (cmd_match(buf, "none"))
2615 recovery_start = MaxSector;
2616 else if (strict_strtoull(buf, 10, &recovery_start))
2619 if (rdev->mddev->pers &&
2620 rdev->raid_disk >= 0)
2623 rdev->recovery_offset = recovery_start;
2624 if (recovery_start == MaxSector)
2625 set_bit(In_sync, &rdev->flags);
2627 clear_bit(In_sync, &rdev->flags);
2631 static struct rdev_sysfs_entry rdev_recovery_start =
2632 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2634 static struct attribute *rdev_default_attrs[] = {
2640 &rdev_recovery_start.attr,
2644 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2646 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2647 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2648 mddev_t *mddev = rdev->mddev;
2654 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2656 if (rdev->mddev == NULL)
2659 rv = entry->show(rdev, page);
2660 mddev_unlock(mddev);
2666 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2667 const char *page, size_t length)
2669 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2670 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2672 mddev_t *mddev = rdev->mddev;
2676 if (!capable(CAP_SYS_ADMIN))
2678 rv = mddev ? mddev_lock(mddev): -EBUSY;
2680 if (rdev->mddev == NULL)
2683 rv = entry->store(rdev, page, length);
2684 mddev_unlock(mddev);
2689 static void rdev_free(struct kobject *ko)
2691 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2694 static struct sysfs_ops rdev_sysfs_ops = {
2695 .show = rdev_attr_show,
2696 .store = rdev_attr_store,
2698 static struct kobj_type rdev_ktype = {
2699 .release = rdev_free,
2700 .sysfs_ops = &rdev_sysfs_ops,
2701 .default_attrs = rdev_default_attrs,
2705 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2707 * mark the device faulty if:
2709 * - the device is nonexistent (zero size)
2710 * - the device has no valid superblock
2712 * a faulty rdev _never_ has rdev->sb set.
2714 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2716 char b[BDEVNAME_SIZE];
2721 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2723 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2724 return ERR_PTR(-ENOMEM);
2727 if ((err = alloc_disk_sb(rdev)))
2730 err = lock_rdev(rdev, newdev, super_format == -2);
2734 kobject_init(&rdev->kobj, &rdev_ktype);
2737 rdev->saved_raid_disk = -1;
2738 rdev->raid_disk = -1;
2740 rdev->data_offset = 0;
2741 rdev->sb_events = 0;
2742 rdev->last_read_error.tv_sec = 0;
2743 rdev->last_read_error.tv_nsec = 0;
2744 atomic_set(&rdev->nr_pending, 0);
2745 atomic_set(&rdev->read_errors, 0);
2746 atomic_set(&rdev->corrected_errors, 0);
2748 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2751 "md: %s has zero or unknown size, marking faulty!\n",
2752 bdevname(rdev->bdev,b));
2757 if (super_format >= 0) {
2758 err = super_types[super_format].
2759 load_super(rdev, NULL, super_minor);
2760 if (err == -EINVAL) {
2762 "md: %s does not have a valid v%d.%d "
2763 "superblock, not importing!\n",
2764 bdevname(rdev->bdev,b),
2765 super_format, super_minor);
2770 "md: could not read %s's sb, not importing!\n",
2771 bdevname(rdev->bdev,b));
2776 INIT_LIST_HEAD(&rdev->same_set);
2777 init_waitqueue_head(&rdev->blocked_wait);
2782 if (rdev->sb_page) {
2788 return ERR_PTR(err);
2792 * Check a full RAID array for plausibility
2796 static void analyze_sbs(mddev_t * mddev)
2799 mdk_rdev_t *rdev, *freshest, *tmp;
2800 char b[BDEVNAME_SIZE];
2803 rdev_for_each(rdev, tmp, mddev)
2804 switch (super_types[mddev->major_version].
2805 load_super(rdev, freshest, mddev->minor_version)) {
2813 "md: fatal superblock inconsistency in %s"
2814 " -- removing from array\n",
2815 bdevname(rdev->bdev,b));
2816 kick_rdev_from_array(rdev);
2820 super_types[mddev->major_version].
2821 validate_super(mddev, freshest);
2824 rdev_for_each(rdev, tmp, mddev) {
2825 if (mddev->max_disks &&
2826 (rdev->desc_nr >= mddev->max_disks ||
2827 i > mddev->max_disks)) {
2829 "md: %s: %s: only %d devices permitted\n",
2830 mdname(mddev), bdevname(rdev->bdev, b),
2832 kick_rdev_from_array(rdev);
2835 if (rdev != freshest)
2836 if (super_types[mddev->major_version].
2837 validate_super(mddev, rdev)) {
2838 printk(KERN_WARNING "md: kicking non-fresh %s"
2840 bdevname(rdev->bdev,b));
2841 kick_rdev_from_array(rdev);
2844 if (mddev->level == LEVEL_MULTIPATH) {
2845 rdev->desc_nr = i++;
2846 rdev->raid_disk = rdev->desc_nr;
2847 set_bit(In_sync, &rdev->flags);
2848 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2849 rdev->raid_disk = -1;
2850 clear_bit(In_sync, &rdev->flags);
2855 /* Read a fixed-point number.
2856 * Numbers in sysfs attributes should be in "standard" units where
2857 * possible, so time should be in seconds.
2858 * However we internally use a a much smaller unit such as
2859 * milliseconds or jiffies.
2860 * This function takes a decimal number with a possible fractional
2861 * component, and produces an integer which is the result of
2862 * multiplying that number by 10^'scale'.
2863 * all without any floating-point arithmetic.
2865 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2867 unsigned long result = 0;
2869 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2872 else if (decimals < scale) {
2875 result = result * 10 + value;
2887 while (decimals < scale) {
2896 static void md_safemode_timeout(unsigned long data);
2899 safe_delay_show(mddev_t *mddev, char *page)
2901 int msec = (mddev->safemode_delay*1000)/HZ;
2902 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2905 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2909 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2912 mddev->safemode_delay = 0;
2914 unsigned long old_delay = mddev->safemode_delay;
2915 mddev->safemode_delay = (msec*HZ)/1000;
2916 if (mddev->safemode_delay == 0)
2917 mddev->safemode_delay = 1;
2918 if (mddev->safemode_delay < old_delay)
2919 md_safemode_timeout((unsigned long)mddev);
2923 static struct md_sysfs_entry md_safe_delay =
2924 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2927 level_show(mddev_t *mddev, char *page)
2929 struct mdk_personality *p = mddev->pers;
2931 return sprintf(page, "%s\n", p->name);
2932 else if (mddev->clevel[0])
2933 return sprintf(page, "%s\n", mddev->clevel);
2934 else if (mddev->level != LEVEL_NONE)
2935 return sprintf(page, "%d\n", mddev->level);
2941 level_store(mddev_t *mddev, const char *buf, size_t len)
2945 struct mdk_personality *pers;
2950 if (mddev->pers == NULL) {
2953 if (len >= sizeof(mddev->clevel))
2955 strncpy(mddev->clevel, buf, len);
2956 if (mddev->clevel[len-1] == '\n')
2958 mddev->clevel[len] = 0;
2959 mddev->level = LEVEL_NONE;
2963 /* request to change the personality. Need to ensure:
2964 * - array is not engaged in resync/recovery/reshape
2965 * - old personality can be suspended
2966 * - new personality will access other array.
2969 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2972 if (!mddev->pers->quiesce) {
2973 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2974 mdname(mddev), mddev->pers->name);
2978 /* Now find the new personality */
2979 if (len == 0 || len >= sizeof(clevel))
2981 strncpy(clevel, buf, len);
2982 if (clevel[len-1] == '\n')
2985 if (strict_strtol(clevel, 10, &level))
2988 if (request_module("md-%s", clevel) != 0)
2989 request_module("md-level-%s", clevel);
2990 spin_lock(&pers_lock);
2991 pers = find_pers(level, clevel);
2992 if (!pers || !try_module_get(pers->owner)) {
2993 spin_unlock(&pers_lock);
2994 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
2997 spin_unlock(&pers_lock);
2999 if (pers == mddev->pers) {
3000 /* Nothing to do! */
3001 module_put(pers->owner);
3004 if (!pers->takeover) {
3005 module_put(pers->owner);
3006 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3007 mdname(mddev), clevel);
3011 /* ->takeover must set new_* and/or delta_disks
3012 * if it succeeds, and may set them when it fails.
3014 priv = pers->takeover(mddev);
3016 mddev->new_level = mddev->level;
3017 mddev->new_layout = mddev->layout;
3018 mddev->new_chunk_sectors = mddev->chunk_sectors;
3019 mddev->raid_disks -= mddev->delta_disks;
3020 mddev->delta_disks = 0;
3021 module_put(pers->owner);
3022 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3023 mdname(mddev), clevel);
3024 return PTR_ERR(priv);
3027 /* Looks like we have a winner */
3028 mddev_suspend(mddev);
3029 mddev->pers->stop(mddev);
3031 if (mddev->pers->sync_request == NULL &&
3032 pers->sync_request != NULL) {
3033 /* need to add the md_redundancy_group */
3034 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3036 "md: cannot register extra attributes for %s\n",
3038 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3040 if (mddev->pers->sync_request != NULL &&
3041 pers->sync_request == NULL) {
3042 /* need to remove the md_redundancy_group */
3043 if (mddev->to_remove == NULL)
3044 mddev->to_remove = &md_redundancy_group;
3047 if (mddev->pers->sync_request == NULL &&
3049 /* We are converting from a no-redundancy array
3050 * to a redundancy array and metadata is managed
3051 * externally so we need to be sure that writes
3052 * won't block due to a need to transition
3054 * until external management is started.
3057 mddev->safemode_delay = 0;
3058 mddev->safemode = 0;
3061 module_put(mddev->pers->owner);
3062 /* Invalidate devices that are now superfluous */
3063 list_for_each_entry(rdev, &mddev->disks, same_set)
3064 if (rdev->raid_disk >= mddev->raid_disks) {
3065 rdev->raid_disk = -1;
3066 clear_bit(In_sync, &rdev->flags);
3069 mddev->private = priv;
3070 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3071 mddev->level = mddev->new_level;
3072 mddev->layout = mddev->new_layout;
3073 mddev->chunk_sectors = mddev->new_chunk_sectors;
3074 mddev->delta_disks = 0;
3075 if (mddev->pers->sync_request == NULL) {
3076 /* this is now an array without redundancy, so
3077 * it must always be in_sync
3080 del_timer_sync(&mddev->safemode_timer);
3083 mddev_resume(mddev);
3084 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3085 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3086 md_wakeup_thread(mddev->thread);
3087 sysfs_notify(&mddev->kobj, NULL, "level");
3088 md_new_event(mddev);
3092 static struct md_sysfs_entry md_level =
3093 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3097 layout_show(mddev_t *mddev, char *page)
3099 /* just a number, not meaningful for all levels */
3100 if (mddev->reshape_position != MaxSector &&
3101 mddev->layout != mddev->new_layout)
3102 return sprintf(page, "%d (%d)\n",
3103 mddev->new_layout, mddev->layout);
3104 return sprintf(page, "%d\n", mddev->layout);
3108 layout_store(mddev_t *mddev, const char *buf, size_t len)
3111 unsigned long n = simple_strtoul(buf, &e, 10);
3113 if (!*buf || (*e && *e != '\n'))
3118 if (mddev->pers->check_reshape == NULL)
3120 mddev->new_layout = n;
3121 err = mddev->pers->check_reshape(mddev);
3123 mddev->new_layout = mddev->layout;
3127 mddev->new_layout = n;
3128 if (mddev->reshape_position == MaxSector)
3133 static struct md_sysfs_entry md_layout =
3134 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3138 raid_disks_show(mddev_t *mddev, char *page)
3140 if (mddev->raid_disks == 0)
3142 if (mddev->reshape_position != MaxSector &&
3143 mddev->delta_disks != 0)
3144 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3145 mddev->raid_disks - mddev->delta_disks);
3146 return sprintf(page, "%d\n", mddev->raid_disks);
3149 static int update_raid_disks(mddev_t *mddev, int raid_disks);
3152 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
3156 unsigned long n = simple_strtoul(buf, &e, 10);
3158 if (!*buf || (*e && *e != '\n'))
3162 rv = update_raid_disks(mddev, n);
3163 else if (mddev->reshape_position != MaxSector) {
3164 int olddisks = mddev->raid_disks - mddev->delta_disks;
3165 mddev->delta_disks = n - olddisks;
3166 mddev->raid_disks = n;
3168 mddev->raid_disks = n;
3169 return rv ? rv : len;
3171 static struct md_sysfs_entry md_raid_disks =
3172 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3175 chunk_size_show(mddev_t *mddev, char *page)
3177 if (mddev->reshape_position != MaxSector &&
3178 mddev->chunk_sectors != mddev->new_chunk_sectors)
3179 return sprintf(page, "%d (%d)\n",
3180 mddev->new_chunk_sectors << 9,
3181 mddev->chunk_sectors << 9);
3182 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3186 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
3189 unsigned long n = simple_strtoul(buf, &e, 10);
3191 if (!*buf || (*e && *e != '\n'))
3196 if (mddev->pers->check_reshape == NULL)
3198 mddev->new_chunk_sectors = n >> 9;
3199 err = mddev->pers->check_reshape(mddev);
3201 mddev->new_chunk_sectors = mddev->chunk_sectors;
3205 mddev->new_chunk_sectors = n >> 9;
3206 if (mddev->reshape_position == MaxSector)
3207 mddev->chunk_sectors = n >> 9;
3211 static struct md_sysfs_entry md_chunk_size =
3212 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3215 resync_start_show(mddev_t *mddev, char *page)
3217 if (mddev->recovery_cp == MaxSector)
3218 return sprintf(page, "none\n");
3219 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3223 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
3226 unsigned long long n = simple_strtoull(buf, &e, 10);
3230 if (cmd_match(buf, "none"))
3232 else if (!*buf || (*e && *e != '\n'))
3235 mddev->recovery_cp = n;
3238 static struct md_sysfs_entry md_resync_start =
3239 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3242 * The array state can be:
3245 * No devices, no size, no level
3246 * Equivalent to STOP_ARRAY ioctl
3248 * May have some settings, but array is not active
3249 * all IO results in error
3250 * When written, doesn't tear down array, but just stops it
3251 * suspended (not supported yet)
3252 * All IO requests will block. The array can be reconfigured.
3253 * Writing this, if accepted, will block until array is quiescent
3255 * no resync can happen. no superblocks get written.
3256 * write requests fail
3258 * like readonly, but behaves like 'clean' on a write request.
3260 * clean - no pending writes, but otherwise active.
3261 * When written to inactive array, starts without resync
3262 * If a write request arrives then
3263 * if metadata is known, mark 'dirty' and switch to 'active'.
3264 * if not known, block and switch to write-pending
3265 * If written to an active array that has pending writes, then fails.
3267 * fully active: IO and resync can be happening.
3268 * When written to inactive array, starts with resync
3271 * clean, but writes are blocked waiting for 'active' to be written.
3274 * like active, but no writes have been seen for a while (100msec).
3277 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3278 write_pending, active_idle, bad_word};
3279 static char *array_states[] = {
3280 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3281 "write-pending", "active-idle", NULL };
3283 static int match_word(const char *word, char **list)
3286 for (n=0; list[n]; n++)
3287 if (cmd_match(word, list[n]))
3293 array_state_show(mddev_t *mddev, char *page)
3295 enum array_state st = inactive;
3308 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3310 else if (mddev->safemode)
3316 if (list_empty(&mddev->disks) &&
3317 mddev->raid_disks == 0 &&
3318 mddev->dev_sectors == 0)
3323 return sprintf(page, "%s\n", array_states[st]);
3326 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3327 static int md_set_readonly(mddev_t * mddev, int is_open);
3328 static int do_md_run(mddev_t * mddev);
3329 static int restart_array(mddev_t *mddev);
3332 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3335 enum array_state st = match_word(buf, array_states);
3340 /* stopping an active array */
3341 if (atomic_read(&mddev->openers) > 0)
3343 err = do_md_stop(mddev, 0, 0);
3346 /* stopping an active array */
3348 if (atomic_read(&mddev->openers) > 0)
3350 err = do_md_stop(mddev, 2, 0);
3352 err = 0; /* already inactive */
3355 break; /* not supported yet */
3358 err = md_set_readonly(mddev, 0);
3361 set_disk_ro(mddev->gendisk, 1);
3362 err = do_md_run(mddev);
3368 err = md_set_readonly(mddev, 0);
3369 else if (mddev->ro == 1)
3370 err = restart_array(mddev);
3373 set_disk_ro(mddev->gendisk, 0);
3377 err = do_md_run(mddev);
3382 restart_array(mddev);
3383 spin_lock_irq(&mddev->write_lock);
3384 if (atomic_read(&mddev->writes_pending) == 0) {
3385 if (mddev->in_sync == 0) {
3387 if (mddev->safemode == 1)
3388 mddev->safemode = 0;
3389 if (mddev->persistent)
3390 set_bit(MD_CHANGE_CLEAN,
3396 spin_unlock_irq(&mddev->write_lock);
3402 restart_array(mddev);
3403 if (mddev->external)
3404 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3405 wake_up(&mddev->sb_wait);
3409 set_disk_ro(mddev->gendisk, 0);
3410 err = do_md_run(mddev);
3415 /* these cannot be set */
3421 sysfs_notify_dirent(mddev->sysfs_state);
3425 static struct md_sysfs_entry md_array_state =
3426 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3429 max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3430 return sprintf(page, "%d\n",
3431 atomic_read(&mddev->max_corr_read_errors));
3435 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3438 unsigned long n = simple_strtoul(buf, &e, 10);
3440 if (*buf && (*e == 0 || *e == '\n')) {
3441 atomic_set(&mddev->max_corr_read_errors, n);
3447 static struct md_sysfs_entry max_corr_read_errors =
3448 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3449 max_corrected_read_errors_store);
3452 null_show(mddev_t *mddev, char *page)
3458 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3460 /* buf must be %d:%d\n? giving major and minor numbers */
3461 /* The new device is added to the array.
3462 * If the array has a persistent superblock, we read the
3463 * superblock to initialise info and check validity.
3464 * Otherwise, only checking done is that in bind_rdev_to_array,
3465 * which mainly checks size.
3468 int major = simple_strtoul(buf, &e, 10);
3474 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3476 minor = simple_strtoul(e+1, &e, 10);
3477 if (*e && *e != '\n')
3479 dev = MKDEV(major, minor);
3480 if (major != MAJOR(dev) ||
3481 minor != MINOR(dev))
3485 if (mddev->persistent) {
3486 rdev = md_import_device(dev, mddev->major_version,
3487 mddev->minor_version);
3488 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3489 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3490 mdk_rdev_t, same_set);
3491 err = super_types[mddev->major_version]
3492 .load_super(rdev, rdev0, mddev->minor_version);
3496 } else if (mddev->external)
3497 rdev = md_import_device(dev, -2, -1);
3499 rdev = md_import_device(dev, -1, -1);
3502 return PTR_ERR(rdev);
3503 err = bind_rdev_to_array(rdev, mddev);
3507 return err ? err : len;
3510 static struct md_sysfs_entry md_new_device =
3511 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3514 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3517 unsigned long chunk, end_chunk;
3521 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3523 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3524 if (buf == end) break;
3525 if (*end == '-') { /* range */
3527 end_chunk = simple_strtoul(buf, &end, 0);
3528 if (buf == end) break;
3530 if (*end && !isspace(*end)) break;
3531 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3532 buf = skip_spaces(end);
3534 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3539 static struct md_sysfs_entry md_bitmap =
3540 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3543 size_show(mddev_t *mddev, char *page)
3545 return sprintf(page, "%llu\n",
3546 (unsigned long long)mddev->dev_sectors / 2);
3549 static int update_size(mddev_t *mddev, sector_t num_sectors);
3552 size_store(mddev_t *mddev, const char *buf, size_t len)
3554 /* If array is inactive, we can reduce the component size, but
3555 * not increase it (except from 0).
3556 * If array is active, we can try an on-line resize
3559 int err = strict_blocks_to_sectors(buf, §ors);
3564 err = update_size(mddev, sectors);
3565 md_update_sb(mddev, 1);
3567 if (mddev->dev_sectors == 0 ||
3568 mddev->dev_sectors > sectors)
3569 mddev->dev_sectors = sectors;
3573 return err ? err : len;
3576 static struct md_sysfs_entry md_size =
3577 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3582 * 'none' for arrays with no metadata (good luck...)
3583 * 'external' for arrays with externally managed metadata,
3584 * or N.M for internally known formats
3587 metadata_show(mddev_t *mddev, char *page)
3589 if (mddev->persistent)
3590 return sprintf(page, "%d.%d\n",
3591 mddev->major_version, mddev->minor_version);
3592 else if (mddev->external)
3593 return sprintf(page, "external:%s\n", mddev->metadata_type);
3595 return sprintf(page, "none\n");
3599 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3603 /* Changing the details of 'external' metadata is
3604 * always permitted. Otherwise there must be
3605 * no devices attached to the array.
3607 if (mddev->external && strncmp(buf, "external:", 9) == 0)
3609 else if (!list_empty(&mddev->disks))
3612 if (cmd_match(buf, "none")) {
3613 mddev->persistent = 0;
3614 mddev->external = 0;
3615 mddev->major_version = 0;
3616 mddev->minor_version = 90;
3619 if (strncmp(buf, "external:", 9) == 0) {
3620 size_t namelen = len-9;
3621 if (namelen >= sizeof(mddev->metadata_type))
3622 namelen = sizeof(mddev->metadata_type)-1;
3623 strncpy(mddev->metadata_type, buf+9, namelen);
3624 mddev->metadata_type[namelen] = 0;
3625 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3626 mddev->metadata_type[--namelen] = 0;
3627 mddev->persistent = 0;
3628 mddev->external = 1;
3629 mddev->major_version = 0;
3630 mddev->minor_version = 90;
3633 major = simple_strtoul(buf, &e, 10);
3634 if (e==buf || *e != '.')
3637 minor = simple_strtoul(buf, &e, 10);
3638 if (e==buf || (*e && *e != '\n') )
3640 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3642 mddev->major_version = major;
3643 mddev->minor_version = minor;
3644 mddev->persistent = 1;
3645 mddev->external = 0;
3649 static struct md_sysfs_entry md_metadata =
3650 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3653 action_show(mddev_t *mddev, char *page)
3655 char *type = "idle";
3656 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3658 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3659 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3660 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3662 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3663 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3665 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3669 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3672 return sprintf(page, "%s\n", type);
3676 action_store(mddev_t *mddev, const char *page, size_t len)
3678 if (!mddev->pers || !mddev->pers->sync_request)
3681 if (cmd_match(page, "frozen"))
3682 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3684 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3686 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3687 if (mddev->sync_thread) {
3688 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3689 md_unregister_thread(mddev->sync_thread);
3690 mddev->sync_thread = NULL;
3691 mddev->recovery = 0;
3693 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3694 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3696 else if (cmd_match(page, "resync"))
3697 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3698 else if (cmd_match(page, "recover")) {
3699 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3700 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3701 } else if (cmd_match(page, "reshape")) {
3703 if (mddev->pers->start_reshape == NULL)
3705 err = mddev->pers->start_reshape(mddev);
3708 sysfs_notify(&mddev->kobj, NULL, "degraded");
3710 if (cmd_match(page, "check"))
3711 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3712 else if (!cmd_match(page, "repair"))
3714 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3715 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3717 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3718 md_wakeup_thread(mddev->thread);
3719 sysfs_notify_dirent(mddev->sysfs_action);
3724 mismatch_cnt_show(mddev_t *mddev, char *page)
3726 return sprintf(page, "%llu\n",
3727 (unsigned long long) mddev->resync_mismatches);
3730 static struct md_sysfs_entry md_scan_mode =
3731 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3734 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3737 sync_min_show(mddev_t *mddev, char *page)
3739 return sprintf(page, "%d (%s)\n", speed_min(mddev),
3740 mddev->sync_speed_min ? "local": "system");
3744 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3748 if (strncmp(buf, "system", 6)==0) {
3749 mddev->sync_speed_min = 0;
3752 min = simple_strtoul(buf, &e, 10);
3753 if (buf == e || (*e && *e != '\n') || min <= 0)
3755 mddev->sync_speed_min = min;
3759 static struct md_sysfs_entry md_sync_min =
3760 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3763 sync_max_show(mddev_t *mddev, char *page)
3765 return sprintf(page, "%d (%s)\n", speed_max(mddev),
3766 mddev->sync_speed_max ? "local": "system");
3770 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3774 if (strncmp(buf, "system", 6)==0) {
3775 mddev->sync_speed_max = 0;
3778 max = simple_strtoul(buf, &e, 10);
3779 if (buf == e || (*e && *e != '\n') || max <= 0)
3781 mddev->sync_speed_max = max;
3785 static struct md_sysfs_entry md_sync_max =
3786 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3789 degraded_show(mddev_t *mddev, char *page)
3791 return sprintf(page, "%d\n", mddev->degraded);
3793 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3796 sync_force_parallel_show(mddev_t *mddev, char *page)
3798 return sprintf(page, "%d\n", mddev->parallel_resync);
3802 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3806 if (strict_strtol(buf, 10, &n))
3809 if (n != 0 && n != 1)
3812 mddev->parallel_resync = n;
3814 if (mddev->sync_thread)
3815 wake_up(&resync_wait);
3820 /* force parallel resync, even with shared block devices */
3821 static struct md_sysfs_entry md_sync_force_parallel =
3822 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3823 sync_force_parallel_show, sync_force_parallel_store);
3826 sync_speed_show(mddev_t *mddev, char *page)
3828 unsigned long resync, dt, db;
3829 if (mddev->curr_resync == 0)
3830 return sprintf(page, "none\n");
3831 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3832 dt = (jiffies - mddev->resync_mark) / HZ;
3834 db = resync - mddev->resync_mark_cnt;
3835 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3838 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3841 sync_completed_show(mddev_t *mddev, char *page)
3843 unsigned long max_sectors, resync;
3845 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3846 return sprintf(page, "none\n");
3848 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3849 max_sectors = mddev->resync_max_sectors;
3851 max_sectors = mddev->dev_sectors;
3853 resync = mddev->curr_resync_completed;
3854 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3857 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3860 min_sync_show(mddev_t *mddev, char *page)
3862 return sprintf(page, "%llu\n",
3863 (unsigned long long)mddev->resync_min);
3866 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3868 unsigned long long min;
3869 if (strict_strtoull(buf, 10, &min))
3871 if (min > mddev->resync_max)
3873 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3876 /* Must be a multiple of chunk_size */
3877 if (mddev->chunk_sectors) {
3878 sector_t temp = min;
3879 if (sector_div(temp, mddev->chunk_sectors))
3882 mddev->resync_min = min;
3887 static struct md_sysfs_entry md_min_sync =
3888 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3891 max_sync_show(mddev_t *mddev, char *page)
3893 if (mddev->resync_max == MaxSector)
3894 return sprintf(page, "max\n");
3896 return sprintf(page, "%llu\n",
3897 (unsigned long long)mddev->resync_max);
3900 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3902 if (strncmp(buf, "max", 3) == 0)
3903 mddev->resync_max = MaxSector;
3905 unsigned long long max;
3906 if (strict_strtoull(buf, 10, &max))
3908 if (max < mddev->resync_min)
3910 if (max < mddev->resync_max &&
3912 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3915 /* Must be a multiple of chunk_size */
3916 if (mddev->chunk_sectors) {
3917 sector_t temp = max;
3918 if (sector_div(temp, mddev->chunk_sectors))
3921 mddev->resync_max = max;
3923 wake_up(&mddev->recovery_wait);
3927 static struct md_sysfs_entry md_max_sync =
3928 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3931 suspend_lo_show(mddev_t *mddev, char *page)
3933 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3937 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3940 unsigned long long new = simple_strtoull(buf, &e, 10);
3942 if (mddev->pers == NULL ||
3943 mddev->pers->quiesce == NULL)
3945 if (buf == e || (*e && *e != '\n'))
3947 if (new >= mddev->suspend_hi ||
3948 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3949 mddev->suspend_lo = new;
3950 mddev->pers->quiesce(mddev, 2);
3955 static struct md_sysfs_entry md_suspend_lo =
3956 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3960 suspend_hi_show(mddev_t *mddev, char *page)
3962 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3966 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3969 unsigned long long new = simple_strtoull(buf, &e, 10);
3971 if (mddev->pers == NULL ||
3972 mddev->pers->quiesce == NULL)
3974 if (buf == e || (*e && *e != '\n'))
3976 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3977 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3978 mddev->suspend_hi = new;
3979 mddev->pers->quiesce(mddev, 1);
3980 mddev->pers->quiesce(mddev, 0);
3985 static struct md_sysfs_entry md_suspend_hi =
3986 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3989 reshape_position_show(mddev_t *mddev, char *page)
3991 if (mddev->reshape_position != MaxSector)
3992 return sprintf(page, "%llu\n",
3993 (unsigned long long)mddev->reshape_position);
3994 strcpy(page, "none\n");
3999 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
4002 unsigned long long new = simple_strtoull(buf, &e, 10);
4005 if (buf == e || (*e && *e != '\n'))
4007 mddev->reshape_position = new;
4008 mddev->delta_disks = 0;
4009 mddev->new_level = mddev->level;
4010 mddev->new_layout = mddev->layout;
4011 mddev->new_chunk_sectors = mddev->chunk_sectors;
4015 static struct md_sysfs_entry md_reshape_position =
4016 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4017 reshape_position_store);
4020 array_size_show(mddev_t *mddev, char *page)
4022 if (mddev->external_size)
4023 return sprintf(page, "%llu\n",
4024 (unsigned long long)mddev->array_sectors/2);
4026 return sprintf(page, "default\n");
4030 array_size_store(mddev_t *mddev, const char *buf, size_t len)
4034 if (strncmp(buf, "default", 7) == 0) {
4036 sectors = mddev->pers->size(mddev, 0, 0);
4038 sectors = mddev->array_sectors;
4040 mddev->external_size = 0;
4042 if (strict_blocks_to_sectors(buf, §ors) < 0)
4044 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4047 mddev->external_size = 1;
4050 mddev->array_sectors = sectors;
4051 set_capacity(mddev->gendisk, mddev->array_sectors);
4053 revalidate_disk(mddev->gendisk);
4058 static struct md_sysfs_entry md_array_size =
4059 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4062 static struct attribute *md_default_attrs[] = {
4065 &md_raid_disks.attr,
4066 &md_chunk_size.attr,
4068 &md_resync_start.attr,
4070 &md_new_device.attr,
4071 &md_safe_delay.attr,
4072 &md_array_state.attr,
4073 &md_reshape_position.attr,
4074 &md_array_size.attr,
4075 &max_corr_read_errors.attr,
4079 static struct attribute *md_redundancy_attrs[] = {
4081 &md_mismatches.attr,
4084 &md_sync_speed.attr,
4085 &md_sync_force_parallel.attr,
4086 &md_sync_completed.attr,
4089 &md_suspend_lo.attr,
4090 &md_suspend_hi.attr,
4095 static struct attribute_group md_redundancy_group = {
4097 .attrs = md_redundancy_attrs,
4102 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4104 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4105 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4110 rv = mddev_lock(mddev);
4112 rv = entry->show(mddev, page);
4113 mddev_unlock(mddev);
4119 md_attr_store(struct kobject *kobj, struct attribute *attr,
4120 const char *page, size_t length)
4122 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4123 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4128 if (!capable(CAP_SYS_ADMIN))
4130 rv = mddev_lock(mddev);
4131 if (mddev->hold_active == UNTIL_IOCTL)
4132 mddev->hold_active = 0;
4134 rv = entry->store(mddev, page, length);
4135 mddev_unlock(mddev);
4140 static void md_free(struct kobject *ko)
4142 mddev_t *mddev = container_of(ko, mddev_t, kobj);
4144 if (mddev->sysfs_state)
4145 sysfs_put(mddev->sysfs_state);
4147 if (mddev->gendisk) {
4148 del_gendisk(mddev->gendisk);
4149 put_disk(mddev->gendisk);
4152 blk_cleanup_queue(mddev->queue);
4157 static struct sysfs_ops md_sysfs_ops = {
4158 .show = md_attr_show,
4159 .store = md_attr_store,
4161 static struct kobj_type md_ktype = {
4163 .sysfs_ops = &md_sysfs_ops,
4164 .default_attrs = md_default_attrs,
4169 static void mddev_delayed_delete(struct work_struct *ws)
4171 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4173 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4174 kobject_del(&mddev->kobj);
4175 kobject_put(&mddev->kobj);
4178 static int md_alloc(dev_t dev, char *name)
4180 static DEFINE_MUTEX(disks_mutex);
4181 mddev_t *mddev = mddev_find(dev);
4182 struct gendisk *disk;
4191 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4192 shift = partitioned ? MdpMinorShift : 0;
4193 unit = MINOR(mddev->unit) >> shift;
4195 /* wait for any previous instance if this device
4196 * to be completed removed (mddev_delayed_delete).
4198 flush_scheduled_work();
4200 mutex_lock(&disks_mutex);
4206 /* Need to ensure that 'name' is not a duplicate.
4209 spin_lock(&all_mddevs_lock);
4211 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4212 if (mddev2->gendisk &&
4213 strcmp(mddev2->gendisk->disk_name, name) == 0) {
4214 spin_unlock(&all_mddevs_lock);
4217 spin_unlock(&all_mddevs_lock);
4221 mddev->queue = blk_alloc_queue(GFP_KERNEL);
4224 mddev->queue->queuedata = mddev;
4226 /* Can be unlocked because the queue is new: no concurrency */
4227 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4229 blk_queue_make_request(mddev->queue, md_make_request);
4231 disk = alloc_disk(1 << shift);
4233 blk_cleanup_queue(mddev->queue);
4234 mddev->queue = NULL;
4237 disk->major = MAJOR(mddev->unit);
4238 disk->first_minor = unit << shift;
4240 strcpy(disk->disk_name, name);
4241 else if (partitioned)
4242 sprintf(disk->disk_name, "md_d%d", unit);
4244 sprintf(disk->disk_name, "md%d", unit);
4245 disk->fops = &md_fops;
4246 disk->private_data = mddev;
4247 disk->queue = mddev->queue;
4248 /* Allow extended partitions. This makes the
4249 * 'mdp' device redundant, but we can't really
4252 disk->flags |= GENHD_FL_EXT_DEVT;
4254 mddev->gendisk = disk;
4255 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4256 &disk_to_dev(disk)->kobj, "%s", "md");
4258 /* This isn't possible, but as kobject_init_and_add is marked
4259 * __must_check, we must do something with the result
4261 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4265 if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4266 printk(KERN_DEBUG "pointless warning\n");
4268 mutex_unlock(&disks_mutex);
4270 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4271 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
4277 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4279 md_alloc(dev, NULL);
4283 static int add_named_array(const char *val, struct kernel_param *kp)
4285 /* val must be "md_*" where * is not all digits.
4286 * We allocate an array with a large free minor number, and
4287 * set the name to val. val must not already be an active name.
4289 int len = strlen(val);
4290 char buf[DISK_NAME_LEN];
4292 while (len && val[len-1] == '\n')
4294 if (len >= DISK_NAME_LEN)
4296 strlcpy(buf, val, len+1);
4297 if (strncmp(buf, "md_", 3) != 0)
4299 return md_alloc(0, buf);
4302 static void md_safemode_timeout(unsigned long data)
4304 mddev_t *mddev = (mddev_t *) data;
4306 if (!atomic_read(&mddev->writes_pending)) {
4307 mddev->safemode = 1;
4308 if (mddev->external)
4309 sysfs_notify_dirent(mddev->sysfs_state);
4311 md_wakeup_thread(mddev->thread);
4314 static int start_dirty_degraded;
4316 static int md_run(mddev_t *mddev)
4320 struct mdk_personality *pers;
4322 if (list_empty(&mddev->disks))
4323 /* cannot run an array with no devices.. */
4329 /* These two calls synchronise us with the
4330 * sysfs_remove_group calls in mddev_unlock,
4331 * so they must have completed.
4333 mutex_lock(&mddev->open_mutex);
4334 mutex_unlock(&mddev->open_mutex);
4337 * Analyze all RAID superblock(s)
4339 if (!mddev->raid_disks) {
4340 if (!mddev->persistent)
4345 if (mddev->level != LEVEL_NONE)
4346 request_module("md-level-%d", mddev->level);
4347 else if (mddev->clevel[0])
4348 request_module("md-%s", mddev->clevel);
4351 * Drop all container device buffers, from now on
4352 * the only valid external interface is through the md
4355 list_for_each_entry(rdev, &mddev->disks, same_set) {
4356 if (test_bit(Faulty, &rdev->flags))
4358 sync_blockdev(rdev->bdev);
4359 invalidate_bdev(rdev->bdev);
4361 /* perform some consistency tests on the device.
4362 * We don't want the data to overlap the metadata,
4363 * Internal Bitmap issues have been handled elsewhere.
4365 if (rdev->data_offset < rdev->sb_start) {
4366 if (mddev->dev_sectors &&
4367 rdev->data_offset + mddev->dev_sectors
4369 printk("md: %s: data overlaps metadata\n",
4374 if (rdev->sb_start + rdev->sb_size/512
4375 > rdev->data_offset) {
4376 printk("md: %s: metadata overlaps data\n",
4381 sysfs_notify_dirent(rdev->sysfs_state);
4384 spin_lock(&pers_lock);
4385 pers = find_pers(mddev->level, mddev->clevel);
4386 if (!pers || !try_module_get(pers->owner)) {
4387 spin_unlock(&pers_lock);
4388 if (mddev->level != LEVEL_NONE)
4389 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4392 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4397 spin_unlock(&pers_lock);
4398 if (mddev->level != pers->level) {
4399 mddev->level = pers->level;
4400 mddev->new_level = pers->level;
4402 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4404 if (mddev->reshape_position != MaxSector &&
4405 pers->start_reshape == NULL) {
4406 /* This personality cannot handle reshaping... */
4408 module_put(pers->owner);
4412 if (pers->sync_request) {
4413 /* Warn if this is a potentially silly
4416 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4420 list_for_each_entry(rdev, &mddev->disks, same_set)
4421 list_for_each_entry(rdev2, &mddev->disks, same_set) {
4423 rdev->bdev->bd_contains ==
4424 rdev2->bdev->bd_contains) {
4426 "%s: WARNING: %s appears to be"
4427 " on the same physical disk as"
4430 bdevname(rdev->bdev,b),
4431 bdevname(rdev2->bdev,b2));
4438 "True protection against single-disk"
4439 " failure might be compromised.\n");
4442 mddev->recovery = 0;
4443 /* may be over-ridden by personality */
4444 mddev->resync_max_sectors = mddev->dev_sectors;
4446 mddev->barriers_work = 1;
4447 mddev->ok_start_degraded = start_dirty_degraded;
4449 if (start_readonly && mddev->ro == 0)
4450 mddev->ro = 2; /* read-only, but switch on first write */
4452 err = mddev->pers->run(mddev);
4454 printk(KERN_ERR "md: pers->run() failed ...\n");
4455 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4456 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4457 " but 'external_size' not in effect?\n", __func__);
4459 "md: invalid array_size %llu > default size %llu\n",
4460 (unsigned long long)mddev->array_sectors / 2,
4461 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4463 mddev->pers->stop(mddev);
4465 if (err == 0 && mddev->pers->sync_request) {
4466 err = bitmap_create(mddev);
4468 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4469 mdname(mddev), err);
4470 mddev->pers->stop(mddev);
4474 module_put(mddev->pers->owner);
4476 bitmap_destroy(mddev);
4479 if (mddev->pers->sync_request) {
4480 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4482 "md: cannot register extra attributes for %s\n",
4484 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4485 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4488 atomic_set(&mddev->writes_pending,0);
4489 atomic_set(&mddev->max_corr_read_errors,
4490 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4491 mddev->safemode = 0;
4492 mddev->safemode_timer.function = md_safemode_timeout;
4493 mddev->safemode_timer.data = (unsigned long) mddev;
4494 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4497 list_for_each_entry(rdev, &mddev->disks, same_set)
4498 if (rdev->raid_disk >= 0) {
4500 sprintf(nm, "rd%d", rdev->raid_disk);
4501 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4502 printk("md: cannot register %s for %s\n",
4506 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4509 md_update_sb(mddev, 0);
4511 md_wakeup_thread(mddev->thread);
4512 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4514 md_new_event(mddev);
4515 sysfs_notify_dirent(mddev->sysfs_state);
4516 if (mddev->sysfs_action)
4517 sysfs_notify_dirent(mddev->sysfs_action);
4518 sysfs_notify(&mddev->kobj, NULL, "degraded");
4522 static int do_md_run(mddev_t *mddev)
4526 err = md_run(mddev);
4530 set_capacity(mddev->gendisk, mddev->array_sectors);
4531 revalidate_disk(mddev->gendisk);
4532 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4537 static int restart_array(mddev_t *mddev)
4539 struct gendisk *disk = mddev->gendisk;
4541 /* Complain if it has no devices */
4542 if (list_empty(&mddev->disks))
4548 mddev->safemode = 0;
4550 set_disk_ro(disk, 0);
4551 printk(KERN_INFO "md: %s switched to read-write mode.\n",
4553 /* Kick recovery or resync if necessary */
4554 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4555 md_wakeup_thread(mddev->thread);
4556 md_wakeup_thread(mddev->sync_thread);
4557 sysfs_notify_dirent(mddev->sysfs_state);
4561 /* similar to deny_write_access, but accounts for our holding a reference
4562 * to the file ourselves */
4563 static int deny_bitmap_write_access(struct file * file)
4565 struct inode *inode = file->f_mapping->host;
4567 spin_lock(&inode->i_lock);
4568 if (atomic_read(&inode->i_writecount) > 1) {
4569 spin_unlock(&inode->i_lock);
4572 atomic_set(&inode->i_writecount, -1);
4573 spin_unlock(&inode->i_lock);
4578 void restore_bitmap_write_access(struct file *file)
4580 struct inode *inode = file->f_mapping->host;
4582 spin_lock(&inode->i_lock);
4583 atomic_set(&inode->i_writecount, 1);
4584 spin_unlock(&inode->i_lock);
4587 static void md_clean(mddev_t *mddev)
4589 mddev->array_sectors = 0;
4590 mddev->external_size = 0;
4591 mddev->dev_sectors = 0;
4592 mddev->raid_disks = 0;
4593 mddev->recovery_cp = 0;
4594 mddev->resync_min = 0;
4595 mddev->resync_max = MaxSector;
4596 mddev->reshape_position = MaxSector;
4597 mddev->external = 0;
4598 mddev->persistent = 0;
4599 mddev->level = LEVEL_NONE;
4600 mddev->clevel[0] = 0;
4603 mddev->metadata_type[0] = 0;
4604 mddev->chunk_sectors = 0;
4605 mddev->ctime = mddev->utime = 0;
4607 mddev->max_disks = 0;
4609 mddev->delta_disks = 0;
4610 mddev->new_level = LEVEL_NONE;
4611 mddev->new_layout = 0;
4612 mddev->new_chunk_sectors = 0;
4613 mddev->curr_resync = 0;
4614 mddev->resync_mismatches = 0;
4615 mddev->suspend_lo = mddev->suspend_hi = 0;
4616 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4617 mddev->recovery = 0;
4619 mddev->degraded = 0;
4620 mddev->barriers_work = 0;
4621 mddev->safemode = 0;
4622 mddev->bitmap_info.offset = 0;
4623 mddev->bitmap_info.default_offset = 0;
4624 mddev->bitmap_info.chunksize = 0;
4625 mddev->bitmap_info.daemon_sleep = 0;
4626 mddev->bitmap_info.max_write_behind = 0;
4629 static void md_stop_writes(mddev_t *mddev)
4631 if (mddev->sync_thread) {
4632 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4633 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4634 md_unregister_thread(mddev->sync_thread);
4635 mddev->sync_thread = NULL;
4638 del_timer_sync(&mddev->safemode_timer);
4640 bitmap_flush(mddev);
4641 md_super_wait(mddev);
4643 if (!mddev->in_sync || mddev->flags) {
4644 /* mark array as shutdown cleanly */
4646 md_update_sb(mddev, 1);
4650 static void md_stop(mddev_t *mddev)
4652 md_stop_writes(mddev);
4654 mddev->pers->stop(mddev);
4655 if (mddev->pers->sync_request && mddev->to_remove == NULL)
4656 mddev->to_remove = &md_redundancy_group;
4657 module_put(mddev->pers->owner);
4659 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4662 static int md_set_readonly(mddev_t *mddev, int is_open)
4665 mutex_lock(&mddev->open_mutex);
4666 if (atomic_read(&mddev->openers) > is_open) {
4667 printk("md: %s still in use.\n",mdname(mddev));
4672 md_stop_writes(mddev);
4678 set_disk_ro(mddev->gendisk, 1);
4679 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4680 sysfs_notify_dirent(mddev->sysfs_state);
4684 mutex_unlock(&mddev->open_mutex);
4689 * 0 - completely stop and dis-assemble array
4690 * 2 - stop but do not disassemble array
4692 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4695 struct gendisk *disk = mddev->gendisk;
4698 mutex_lock(&mddev->open_mutex);
4699 if (atomic_read(&mddev->openers) > is_open) {
4700 printk("md: %s still in use.\n",mdname(mddev));
4702 } else if (mddev->pers) {
4705 set_disk_ro(disk, 0);
4708 mddev->queue->merge_bvec_fn = NULL;
4709 mddev->queue->unplug_fn = NULL;
4710 mddev->queue->backing_dev_info.congested_fn = NULL;
4712 /* tell userspace to handle 'inactive' */
4713 sysfs_notify_dirent(mddev->sysfs_state);
4715 list_for_each_entry(rdev, &mddev->disks, same_set)
4716 if (rdev->raid_disk >= 0) {
4718 sprintf(nm, "rd%d", rdev->raid_disk);
4719 sysfs_remove_link(&mddev->kobj, nm);
4722 set_capacity(disk, 0);
4723 revalidate_disk(disk);
4730 mutex_unlock(&mddev->open_mutex);
4734 * Free resources if final stop
4738 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4740 bitmap_destroy(mddev);
4741 if (mddev->bitmap_info.file) {
4742 restore_bitmap_write_access(mddev->bitmap_info.file);
4743 fput(mddev->bitmap_info.file);
4744 mddev->bitmap_info.file = NULL;
4746 mddev->bitmap_info.offset = 0;
4748 export_array(mddev);
4751 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4752 if (mddev->hold_active == UNTIL_STOP)
4753 mddev->hold_active = 0;
4757 blk_integrity_unregister(disk);
4758 md_new_event(mddev);
4759 sysfs_notify_dirent(mddev->sysfs_state);
4764 static void autorun_array(mddev_t *mddev)
4769 if (list_empty(&mddev->disks))
4772 printk(KERN_INFO "md: running: ");
4774 list_for_each_entry(rdev, &mddev->disks, same_set) {
4775 char b[BDEVNAME_SIZE];
4776 printk("<%s>", bdevname(rdev->bdev,b));
4780 err = do_md_run(mddev);
4782 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4783 do_md_stop(mddev, 0, 0);
4788 * lets try to run arrays based on all disks that have arrived
4789 * until now. (those are in pending_raid_disks)
4791 * the method: pick the first pending disk, collect all disks with
4792 * the same UUID, remove all from the pending list and put them into
4793 * the 'same_array' list. Then order this list based on superblock
4794 * update time (freshest comes first), kick out 'old' disks and
4795 * compare superblocks. If everything's fine then run it.
4797 * If "unit" is allocated, then bump its reference count
4799 static void autorun_devices(int part)
4801 mdk_rdev_t *rdev0, *rdev, *tmp;
4803 char b[BDEVNAME_SIZE];
4805 printk(KERN_INFO "md: autorun ...\n");
4806 while (!list_empty(&pending_raid_disks)) {
4809 LIST_HEAD(candidates);
4810 rdev0 = list_entry(pending_raid_disks.next,
4811 mdk_rdev_t, same_set);
4813 printk(KERN_INFO "md: considering %s ...\n",
4814 bdevname(rdev0->bdev,b));
4815 INIT_LIST_HEAD(&candidates);
4816 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4817 if (super_90_load(rdev, rdev0, 0) >= 0) {
4818 printk(KERN_INFO "md: adding %s ...\n",
4819 bdevname(rdev->bdev,b));
4820 list_move(&rdev->same_set, &candidates);
4823 * now we have a set of devices, with all of them having
4824 * mostly sane superblocks. It's time to allocate the
4828 dev = MKDEV(mdp_major,
4829 rdev0->preferred_minor << MdpMinorShift);
4830 unit = MINOR(dev) >> MdpMinorShift;
4832 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4835 if (rdev0->preferred_minor != unit) {
4836 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4837 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4841 md_probe(dev, NULL, NULL);
4842 mddev = mddev_find(dev);
4843 if (!mddev || !mddev->gendisk) {
4847 "md: cannot allocate memory for md drive.\n");
4850 if (mddev_lock(mddev))
4851 printk(KERN_WARNING "md: %s locked, cannot run\n",
4853 else if (mddev->raid_disks || mddev->major_version
4854 || !list_empty(&mddev->disks)) {
4856 "md: %s already running, cannot run %s\n",
4857 mdname(mddev), bdevname(rdev0->bdev,b));
4858 mddev_unlock(mddev);
4860 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4861 mddev->persistent = 1;
4862 rdev_for_each_list(rdev, tmp, &candidates) {
4863 list_del_init(&rdev->same_set);
4864 if (bind_rdev_to_array(rdev, mddev))
4867 autorun_array(mddev);
4868 mddev_unlock(mddev);
4870 /* on success, candidates will be empty, on error
4873 rdev_for_each_list(rdev, tmp, &candidates) {
4874 list_del_init(&rdev->same_set);
4879 printk(KERN_INFO "md: ... autorun DONE.\n");
4881 #endif /* !MODULE */
4883 static int get_version(void __user * arg)
4887 ver.major = MD_MAJOR_VERSION;
4888 ver.minor = MD_MINOR_VERSION;
4889 ver.patchlevel = MD_PATCHLEVEL_VERSION;
4891 if (copy_to_user(arg, &ver, sizeof(ver)))
4897 static int get_array_info(mddev_t * mddev, void __user * arg)
4899 mdu_array_info_t info;
4900 int nr,working,insync,failed,spare;
4903 nr=working=insync=failed=spare=0;
4904 list_for_each_entry(rdev, &mddev->disks, same_set) {
4906 if (test_bit(Faulty, &rdev->flags))
4910 if (test_bit(In_sync, &rdev->flags))
4917 info.major_version = mddev->major_version;
4918 info.minor_version = mddev->minor_version;
4919 info.patch_version = MD_PATCHLEVEL_VERSION;
4920 info.ctime = mddev->ctime;
4921 info.level = mddev->level;
4922 info.size = mddev->dev_sectors / 2;
4923 if (info.size != mddev->dev_sectors / 2) /* overflow */
4926 info.raid_disks = mddev->raid_disks;
4927 info.md_minor = mddev->md_minor;
4928 info.not_persistent= !mddev->persistent;
4930 info.utime = mddev->utime;
4933 info.state = (1<<MD_SB_CLEAN);
4934 if (mddev->bitmap && mddev->bitmap_info.offset)
4935 info.state = (1<<MD_SB_BITMAP_PRESENT);
4936 info.active_disks = insync;
4937 info.working_disks = working;
4938 info.failed_disks = failed;
4939 info.spare_disks = spare;
4941 info.layout = mddev->layout;
4942 info.chunk_size = mddev->chunk_sectors << 9;
4944 if (copy_to_user(arg, &info, sizeof(info)))
4950 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4952 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4953 char *ptr, *buf = NULL;
4956 if (md_allow_write(mddev))
4957 file = kmalloc(sizeof(*file), GFP_NOIO);
4959 file = kmalloc(sizeof(*file), GFP_KERNEL);
4964 /* bitmap disabled, zero the first byte and copy out */
4965 if (!mddev->bitmap || !mddev->bitmap->file) {
4966 file->pathname[0] = '\0';
4970 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4974 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4978 strcpy(file->pathname, ptr);
4982 if (copy_to_user(arg, file, sizeof(*file)))
4990 static int get_disk_info(mddev_t * mddev, void __user * arg)
4992 mdu_disk_info_t info;
4995 if (copy_from_user(&info, arg, sizeof(info)))
4998 rdev = find_rdev_nr(mddev, info.number);
5000 info.major = MAJOR(rdev->bdev->bd_dev);
5001 info.minor = MINOR(rdev->bdev->bd_dev);
5002 info.raid_disk = rdev->raid_disk;
5004 if (test_bit(Faulty, &rdev->flags))
5005 info.state |= (1<<MD_DISK_FAULTY);
5006 else if (test_bit(In_sync, &rdev->flags)) {
5007 info.state |= (1<<MD_DISK_ACTIVE);
5008 info.state |= (1<<MD_DISK_SYNC);
5010 if (test_bit(WriteMostly, &rdev->flags))
5011 info.state |= (1<<MD_DISK_WRITEMOSTLY);
5013 info.major = info.minor = 0;
5014 info.raid_disk = -1;
5015 info.state = (1<<MD_DISK_REMOVED);
5018 if (copy_to_user(arg, &info, sizeof(info)))
5024 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5026 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5028 dev_t dev = MKDEV(info->major,info->minor);
5030 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5033 if (!mddev->raid_disks) {
5035 /* expecting a device which has a superblock */
5036 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5039 "md: md_import_device returned %ld\n",
5041 return PTR_ERR(rdev);
5043 if (!list_empty(&mddev->disks)) {
5044 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
5045 mdk_rdev_t, same_set);
5046 err = super_types[mddev->major_version]
5047 .load_super(rdev, rdev0, mddev->minor_version);
5050 "md: %s has different UUID to %s\n",
5051 bdevname(rdev->bdev,b),
5052 bdevname(rdev0->bdev,b2));
5057 err = bind_rdev_to_array(rdev, mddev);
5064 * add_new_disk can be used once the array is assembled
5065 * to add "hot spares". They must already have a superblock
5070 if (!mddev->pers->hot_add_disk) {
5072 "%s: personality does not support diskops!\n",
5076 if (mddev->persistent)
5077 rdev = md_import_device(dev, mddev->major_version,
5078 mddev->minor_version);
5080 rdev = md_import_device(dev, -1, -1);
5083 "md: md_import_device returned %ld\n",
5085 return PTR_ERR(rdev);
5087 /* set save_raid_disk if appropriate */
5088 if (!mddev->persistent) {
5089 if (info->state & (1<<MD_DISK_SYNC) &&
5090 info->raid_disk < mddev->raid_disks)
5091 rdev->raid_disk = info->raid_disk;
5093 rdev->raid_disk = -1;
5095 super_types[mddev->major_version].
5096 validate_super(mddev, rdev);
5097 rdev->saved_raid_disk = rdev->raid_disk;
5099 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5100 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5101 set_bit(WriteMostly, &rdev->flags);
5103 clear_bit(WriteMostly, &rdev->flags);
5105 rdev->raid_disk = -1;
5106 err = bind_rdev_to_array(rdev, mddev);
5107 if (!err && !mddev->pers->hot_remove_disk) {
5108 /* If there is hot_add_disk but no hot_remove_disk
5109 * then added disks for geometry changes,
5110 * and should be added immediately.
5112 super_types[mddev->major_version].
5113 validate_super(mddev, rdev);
5114 err = mddev->pers->hot_add_disk(mddev, rdev);
5116 unbind_rdev_from_array(rdev);
5121 sysfs_notify_dirent(rdev->sysfs_state);
5123 md_update_sb(mddev, 1);
5124 if (mddev->degraded)
5125 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5126 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5127 md_wakeup_thread(mddev->thread);
5131 /* otherwise, add_new_disk is only allowed
5132 * for major_version==0 superblocks
5134 if (mddev->major_version != 0) {
5135 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5140 if (!(info->state & (1<<MD_DISK_FAULTY))) {
5142 rdev = md_import_device(dev, -1, 0);
5145 "md: error, md_import_device() returned %ld\n",
5147 return PTR_ERR(rdev);
5149 rdev->desc_nr = info->number;
5150 if (info->raid_disk < mddev->raid_disks)
5151 rdev->raid_disk = info->raid_disk;
5153 rdev->raid_disk = -1;
5155 if (rdev->raid_disk < mddev->raid_disks)
5156 if (info->state & (1<<MD_DISK_SYNC))
5157 set_bit(In_sync, &rdev->flags);
5159 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5160 set_bit(WriteMostly, &rdev->flags);
5162 if (!mddev->persistent) {
5163 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5164 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5166 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5167 rdev->sectors = rdev->sb_start;
5169 err = bind_rdev_to_array(rdev, mddev);
5179 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
5181 char b[BDEVNAME_SIZE];
5184 rdev = find_rdev(mddev, dev);
5188 if (rdev->raid_disk >= 0)
5191 kick_rdev_from_array(rdev);
5192 md_update_sb(mddev, 1);
5193 md_new_event(mddev);
5197 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5198 bdevname(rdev->bdev,b), mdname(mddev));
5202 static int hot_add_disk(mddev_t * mddev, dev_t dev)
5204 char b[BDEVNAME_SIZE];
5211 if (mddev->major_version != 0) {
5212 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5213 " version-0 superblocks.\n",
5217 if (!mddev->pers->hot_add_disk) {
5219 "%s: personality does not support diskops!\n",
5224 rdev = md_import_device(dev, -1, 0);
5227 "md: error, md_import_device() returned %ld\n",
5232 if (mddev->persistent)
5233 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5235 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5237 rdev->sectors = rdev->sb_start;
5239 if (test_bit(Faulty, &rdev->flags)) {
5241 "md: can not hot-add faulty %s disk to %s!\n",
5242 bdevname(rdev->bdev,b), mdname(mddev));
5246 clear_bit(In_sync, &rdev->flags);
5248 rdev->saved_raid_disk = -1;
5249 err = bind_rdev_to_array(rdev, mddev);
5254 * The rest should better be atomic, we can have disk failures
5255 * noticed in interrupt contexts ...
5258 rdev->raid_disk = -1;
5260 md_update_sb(mddev, 1);
5263 * Kick recovery, maybe this spare has to be added to the
5264 * array immediately.
5266 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5267 md_wakeup_thread(mddev->thread);
5268 md_new_event(mddev);
5276 static int set_bitmap_file(mddev_t *mddev, int fd)
5281 if (!mddev->pers->quiesce)
5283 if (mddev->recovery || mddev->sync_thread)
5285 /* we should be able to change the bitmap.. */
5291 return -EEXIST; /* cannot add when bitmap is present */
5292 mddev->bitmap_info.file = fget(fd);
5294 if (mddev->bitmap_info.file == NULL) {
5295 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5300 err = deny_bitmap_write_access(mddev->bitmap_info.file);
5302 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5304 fput(mddev->bitmap_info.file);
5305 mddev->bitmap_info.file = NULL;
5308 mddev->bitmap_info.offset = 0; /* file overrides offset */
5309 } else if (mddev->bitmap == NULL)
5310 return -ENOENT; /* cannot remove what isn't there */
5313 mddev->pers->quiesce(mddev, 1);
5315 err = bitmap_create(mddev);
5316 if (fd < 0 || err) {
5317 bitmap_destroy(mddev);
5318 fd = -1; /* make sure to put the file */
5320 mddev->pers->quiesce(mddev, 0);
5323 if (mddev->bitmap_info.file) {
5324 restore_bitmap_write_access(mddev->bitmap_info.file);
5325 fput(mddev->bitmap_info.file);
5327 mddev->bitmap_info.file = NULL;
5334 * set_array_info is used two different ways
5335 * The original usage is when creating a new array.
5336 * In this usage, raid_disks is > 0 and it together with
5337 * level, size, not_persistent,layout,chunksize determine the
5338 * shape of the array.
5339 * This will always create an array with a type-0.90.0 superblock.
5340 * The newer usage is when assembling an array.
5341 * In this case raid_disks will be 0, and the major_version field is
5342 * use to determine which style super-blocks are to be found on the devices.
5343 * The minor and patch _version numbers are also kept incase the
5344 * super_block handler wishes to interpret them.
5346 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5349 if (info->raid_disks == 0) {
5350 /* just setting version number for superblock loading */
5351 if (info->major_version < 0 ||
5352 info->major_version >= ARRAY_SIZE(super_types) ||
5353 super_types[info->major_version].name == NULL) {
5354 /* maybe try to auto-load a module? */
5356 "md: superblock version %d not known\n",
5357 info->major_version);
5360 mddev->major_version = info->major_version;
5361 mddev->minor_version = info->minor_version;
5362 mddev->patch_version = info->patch_version;
5363 mddev->persistent = !info->not_persistent;
5364 /* ensure mddev_put doesn't delete this now that there
5365 * is some minimal configuration.
5367 mddev->ctime = get_seconds();
5370 mddev->major_version = MD_MAJOR_VERSION;
5371 mddev->minor_version = MD_MINOR_VERSION;
5372 mddev->patch_version = MD_PATCHLEVEL_VERSION;
5373 mddev->ctime = get_seconds();
5375 mddev->level = info->level;
5376 mddev->clevel[0] = 0;
5377 mddev->dev_sectors = 2 * (sector_t)info->size;
5378 mddev->raid_disks = info->raid_disks;
5379 /* don't set md_minor, it is determined by which /dev/md* was
5382 if (info->state & (1<<MD_SB_CLEAN))
5383 mddev->recovery_cp = MaxSector;
5385 mddev->recovery_cp = 0;
5386 mddev->persistent = ! info->not_persistent;
5387 mddev->external = 0;
5389 mddev->layout = info->layout;
5390 mddev->chunk_sectors = info->chunk_size >> 9;
5392 mddev->max_disks = MD_SB_DISKS;
5394 if (mddev->persistent)
5396 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5398 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5399 mddev->bitmap_info.offset = 0;
5401 mddev->reshape_position = MaxSector;
5404 * Generate a 128 bit UUID
5406 get_random_bytes(mddev->uuid, 16);
5408 mddev->new_level = mddev->level;
5409 mddev->new_chunk_sectors = mddev->chunk_sectors;
5410 mddev->new_layout = mddev->layout;
5411 mddev->delta_disks = 0;
5416 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5418 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5420 if (mddev->external_size)
5423 mddev->array_sectors = array_sectors;
5425 EXPORT_SYMBOL(md_set_array_sectors);
5427 static int update_size(mddev_t *mddev, sector_t num_sectors)
5431 int fit = (num_sectors == 0);
5433 if (mddev->pers->resize == NULL)
5435 /* The "num_sectors" is the number of sectors of each device that
5436 * is used. This can only make sense for arrays with redundancy.
5437 * linear and raid0 always use whatever space is available. We can only
5438 * consider changing this number if no resync or reconstruction is
5439 * happening, and if the new size is acceptable. It must fit before the
5440 * sb_start or, if that is <data_offset, it must fit before the size
5441 * of each device. If num_sectors is zero, we find the largest size
5445 if (mddev->sync_thread)
5448 /* Sorry, cannot grow a bitmap yet, just remove it,
5452 list_for_each_entry(rdev, &mddev->disks, same_set) {
5453 sector_t avail = rdev->sectors;
5455 if (fit && (num_sectors == 0 || num_sectors > avail))
5456 num_sectors = avail;
5457 if (avail < num_sectors)
5460 rv = mddev->pers->resize(mddev, num_sectors);
5462 revalidate_disk(mddev->gendisk);
5466 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5469 /* change the number of raid disks */
5470 if (mddev->pers->check_reshape == NULL)
5472 if (raid_disks <= 0 ||
5473 (mddev->max_disks && raid_disks >= mddev->max_disks))
5475 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5477 mddev->delta_disks = raid_disks - mddev->raid_disks;
5479 rv = mddev->pers->check_reshape(mddev);
5485 * update_array_info is used to change the configuration of an
5487 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5488 * fields in the info are checked against the array.
5489 * Any differences that cannot be handled will cause an error.
5490 * Normally, only one change can be managed at a time.
5492 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5498 /* calculate expected state,ignoring low bits */
5499 if (mddev->bitmap && mddev->bitmap_info.offset)
5500 state |= (1 << MD_SB_BITMAP_PRESENT);
5502 if (mddev->major_version != info->major_version ||
5503 mddev->minor_version != info->minor_version ||
5504 /* mddev->patch_version != info->patch_version || */
5505 mddev->ctime != info->ctime ||
5506 mddev->level != info->level ||
5507 /* mddev->layout != info->layout || */
5508 !mddev->persistent != info->not_persistent||
5509 mddev->chunk_sectors != info->chunk_size >> 9 ||
5510 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5511 ((state^info->state) & 0xfffffe00)
5514 /* Check there is only one change */
5515 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5517 if (mddev->raid_disks != info->raid_disks)
5519 if (mddev->layout != info->layout)
5521 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5528 if (mddev->layout != info->layout) {
5530 * we don't need to do anything at the md level, the
5531 * personality will take care of it all.
5533 if (mddev->pers->check_reshape == NULL)
5536 mddev->new_layout = info->layout;
5537 rv = mddev->pers->check_reshape(mddev);
5539 mddev->new_layout = mddev->layout;
5543 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5544 rv = update_size(mddev, (sector_t)info->size * 2);
5546 if (mddev->raid_disks != info->raid_disks)
5547 rv = update_raid_disks(mddev, info->raid_disks);
5549 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5550 if (mddev->pers->quiesce == NULL)
5552 if (mddev->recovery || mddev->sync_thread)
5554 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5555 /* add the bitmap */
5558 if (mddev->bitmap_info.default_offset == 0)
5560 mddev->bitmap_info.offset =
5561 mddev->bitmap_info.default_offset;
5562 mddev->pers->quiesce(mddev, 1);
5563 rv = bitmap_create(mddev);
5565 bitmap_destroy(mddev);
5566 mddev->pers->quiesce(mddev, 0);
5568 /* remove the bitmap */
5571 if (mddev->bitmap->file)
5573 mddev->pers->quiesce(mddev, 1);
5574 bitmap_destroy(mddev);
5575 mddev->pers->quiesce(mddev, 0);
5576 mddev->bitmap_info.offset = 0;
5579 md_update_sb(mddev, 1);
5583 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5587 if (mddev->pers == NULL)
5590 rdev = find_rdev(mddev, dev);
5594 md_error(mddev, rdev);
5599 * We have a problem here : there is no easy way to give a CHS
5600 * virtual geometry. We currently pretend that we have a 2 heads
5601 * 4 sectors (with a BIG number of cylinders...). This drives
5602 * dosfs just mad... ;-)
5604 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5606 mddev_t *mddev = bdev->bd_disk->private_data;
5610 geo->cylinders = mddev->array_sectors / 8;
5614 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5615 unsigned int cmd, unsigned long arg)
5618 void __user *argp = (void __user *)arg;
5619 mddev_t *mddev = NULL;
5622 if (!capable(CAP_SYS_ADMIN))
5626 * Commands dealing with the RAID driver but not any
5632 err = get_version(argp);
5635 case PRINT_RAID_DEBUG:
5643 autostart_arrays(arg);
5650 * Commands creating/starting a new array:
5653 mddev = bdev->bd_disk->private_data;
5660 err = mddev_lock(mddev);
5663 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5670 case SET_ARRAY_INFO:
5672 mdu_array_info_t info;
5674 memset(&info, 0, sizeof(info));
5675 else if (copy_from_user(&info, argp, sizeof(info))) {
5680 err = update_array_info(mddev, &info);
5682 printk(KERN_WARNING "md: couldn't update"
5683 " array info. %d\n", err);
5688 if (!list_empty(&mddev->disks)) {
5690 "md: array %s already has disks!\n",
5695 if (mddev->raid_disks) {
5697 "md: array %s already initialised!\n",
5702 err = set_array_info(mddev, &info);
5704 printk(KERN_WARNING "md: couldn't set"
5705 " array info. %d\n", err);
5715 * Commands querying/configuring an existing array:
5717 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5718 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5719 if ((!mddev->raid_disks && !mddev->external)
5720 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5721 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5722 && cmd != GET_BITMAP_FILE) {
5728 * Commands even a read-only array can execute:
5732 case GET_ARRAY_INFO:
5733 err = get_array_info(mddev, argp);
5736 case GET_BITMAP_FILE:
5737 err = get_bitmap_file(mddev, argp);
5741 err = get_disk_info(mddev, argp);
5744 case RESTART_ARRAY_RW:
5745 err = restart_array(mddev);
5749 err = do_md_stop(mddev, 0, 1);
5753 err = md_set_readonly(mddev, 1);
5757 if (get_user(ro, (int __user *)(arg))) {
5763 /* if the bdev is going readonly the value of mddev->ro
5764 * does not matter, no writes are coming
5769 /* are we are already prepared for writes? */
5773 /* transitioning to readauto need only happen for
5774 * arrays that call md_write_start
5777 err = restart_array(mddev);
5780 set_disk_ro(mddev->gendisk, 0);
5787 * The remaining ioctls are changing the state of the
5788 * superblock, so we do not allow them on read-only arrays.
5789 * However non-MD ioctls (e.g. get-size) will still come through
5790 * here and hit the 'default' below, so only disallow
5791 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5793 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5794 if (mddev->ro == 2) {
5796 sysfs_notify_dirent(mddev->sysfs_state);
5797 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5798 md_wakeup_thread(mddev->thread);
5809 mdu_disk_info_t info;
5810 if (copy_from_user(&info, argp, sizeof(info)))
5813 err = add_new_disk(mddev, &info);
5817 case HOT_REMOVE_DISK:
5818 err = hot_remove_disk(mddev, new_decode_dev(arg));
5822 err = hot_add_disk(mddev, new_decode_dev(arg));
5825 case SET_DISK_FAULTY:
5826 err = set_disk_faulty(mddev, new_decode_dev(arg));
5830 err = do_md_run(mddev);
5833 case SET_BITMAP_FILE:
5834 err = set_bitmap_file(mddev, (int)arg);
5844 if (mddev->hold_active == UNTIL_IOCTL &&
5846 mddev->hold_active = 0;
5847 mddev_unlock(mddev);
5856 #ifdef CONFIG_COMPAT
5857 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
5858 unsigned int cmd, unsigned long arg)
5861 case HOT_REMOVE_DISK:
5863 case SET_DISK_FAULTY:
5864 case SET_BITMAP_FILE:
5865 /* These take in integer arg, do not convert */
5868 arg = (unsigned long)compat_ptr(arg);
5872 return md_ioctl(bdev, mode, cmd, arg);
5874 #endif /* CONFIG_COMPAT */
5876 static int md_open(struct block_device *bdev, fmode_t mode)
5879 * Succeed if we can lock the mddev, which confirms that
5880 * it isn't being stopped right now.
5882 mddev_t *mddev = mddev_find(bdev->bd_dev);
5885 if (mddev->gendisk != bdev->bd_disk) {
5886 /* we are racing with mddev_put which is discarding this
5890 /* Wait until bdev->bd_disk is definitely gone */
5891 flush_scheduled_work();
5892 /* Then retry the open from the top */
5893 return -ERESTARTSYS;
5895 BUG_ON(mddev != bdev->bd_disk->private_data);
5897 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5901 atomic_inc(&mddev->openers);
5902 mutex_unlock(&mddev->open_mutex);
5908 static int md_release(struct gendisk *disk, fmode_t mode)
5910 mddev_t *mddev = disk->private_data;
5913 atomic_dec(&mddev->openers);
5918 static const struct block_device_operations md_fops =
5920 .owner = THIS_MODULE,
5922 .release = md_release,
5924 #ifdef CONFIG_COMPAT
5925 .compat_ioctl = md_compat_ioctl,
5927 .getgeo = md_getgeo,
5930 static int md_thread(void * arg)
5932 mdk_thread_t *thread = arg;
5935 * md_thread is a 'system-thread', it's priority should be very
5936 * high. We avoid resource deadlocks individually in each
5937 * raid personality. (RAID5 does preallocation) We also use RR and
5938 * the very same RT priority as kswapd, thus we will never get
5939 * into a priority inversion deadlock.
5941 * we definitely have to have equal or higher priority than
5942 * bdflush, otherwise bdflush will deadlock if there are too
5943 * many dirty RAID5 blocks.
5946 allow_signal(SIGKILL);
5947 while (!kthread_should_stop()) {
5949 /* We need to wait INTERRUPTIBLE so that
5950 * we don't add to the load-average.
5951 * That means we need to be sure no signals are
5954 if (signal_pending(current))
5955 flush_signals(current);
5957 wait_event_interruptible_timeout
5959 test_bit(THREAD_WAKEUP, &thread->flags)
5960 || kthread_should_stop(),
5963 clear_bit(THREAD_WAKEUP, &thread->flags);
5965 thread->run(thread->mddev);
5971 void md_wakeup_thread(mdk_thread_t *thread)
5974 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5975 set_bit(THREAD_WAKEUP, &thread->flags);
5976 wake_up(&thread->wqueue);
5980 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5983 mdk_thread_t *thread;
5985 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5989 init_waitqueue_head(&thread->wqueue);
5992 thread->mddev = mddev;
5993 thread->timeout = MAX_SCHEDULE_TIMEOUT;
5994 thread->tsk = kthread_run(md_thread, thread,
5996 mdname(thread->mddev),
5997 name ?: mddev->pers->name);
5998 if (IS_ERR(thread->tsk)) {
6005 void md_unregister_thread(mdk_thread_t *thread)
6009 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6011 kthread_stop(thread->tsk);
6015 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
6022 if (!rdev || test_bit(Faulty, &rdev->flags))
6025 if (mddev->external)
6026 set_bit(Blocked, &rdev->flags);
6028 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
6030 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
6031 __builtin_return_address(0),__builtin_return_address(1),
6032 __builtin_return_address(2),__builtin_return_address(3));
6036 if (!mddev->pers->error_handler)
6038 mddev->pers->error_handler(mddev,rdev);
6039 if (mddev->degraded)
6040 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6041 sysfs_notify_dirent(rdev->sysfs_state);
6042 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6043 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6044 md_wakeup_thread(mddev->thread);
6045 md_new_event_inintr(mddev);
6048 /* seq_file implementation /proc/mdstat */
6050 static void status_unused(struct seq_file *seq)
6055 seq_printf(seq, "unused devices: ");
6057 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6058 char b[BDEVNAME_SIZE];
6060 seq_printf(seq, "%s ",
6061 bdevname(rdev->bdev,b));
6064 seq_printf(seq, "<none>");
6066 seq_printf(seq, "\n");
6070 static void status_resync(struct seq_file *seq, mddev_t * mddev)
6072 sector_t max_sectors, resync, res;
6073 unsigned long dt, db;
6076 unsigned int per_milli;
6078 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
6080 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6081 max_sectors = mddev->resync_max_sectors;
6083 max_sectors = mddev->dev_sectors;
6086 * Should not happen.
6092 /* Pick 'scale' such that (resync>>scale)*1000 will fit
6093 * in a sector_t, and (max_sectors>>scale) will fit in a
6094 * u32, as those are the requirements for sector_div.
6095 * Thus 'scale' must be at least 10
6098 if (sizeof(sector_t) > sizeof(unsigned long)) {
6099 while ( max_sectors/2 > (1ULL<<(scale+32)))
6102 res = (resync>>scale)*1000;
6103 sector_div(res, (u32)((max_sectors>>scale)+1));
6107 int i, x = per_milli/50, y = 20-x;
6108 seq_printf(seq, "[");
6109 for (i = 0; i < x; i++)
6110 seq_printf(seq, "=");
6111 seq_printf(seq, ">");
6112 for (i = 0; i < y; i++)
6113 seq_printf(seq, ".");
6114 seq_printf(seq, "] ");
6116 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6117 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6119 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6121 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6122 "resync" : "recovery"))),
6123 per_milli/10, per_milli % 10,
6124 (unsigned long long) resync/2,
6125 (unsigned long long) max_sectors/2);
6128 * dt: time from mark until now
6129 * db: blocks written from mark until now
6130 * rt: remaining time
6132 * rt is a sector_t, so could be 32bit or 64bit.
6133 * So we divide before multiply in case it is 32bit and close
6135 * We scale the divisor (db) by 32 to avoid loosing precision
6136 * near the end of resync when the number of remaining sectors
6138 * We then divide rt by 32 after multiplying by db to compensate.
6139 * The '+1' avoids division by zero if db is very small.
6141 dt = ((jiffies - mddev->resync_mark) / HZ);
6143 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6144 - mddev->resync_mark_cnt;
6146 rt = max_sectors - resync; /* number of remaining sectors */
6147 sector_div(rt, db/32+1);
6151 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
6152 ((unsigned long)rt % 60)/6);
6154 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
6157 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
6159 struct list_head *tmp;
6169 spin_lock(&all_mddevs_lock);
6170 list_for_each(tmp,&all_mddevs)
6172 mddev = list_entry(tmp, mddev_t, all_mddevs);
6174 spin_unlock(&all_mddevs_lock);
6177 spin_unlock(&all_mddevs_lock);
6179 return (void*)2;/* tail */
6183 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
6185 struct list_head *tmp;
6186 mddev_t *next_mddev, *mddev = v;
6192 spin_lock(&all_mddevs_lock);
6194 tmp = all_mddevs.next;
6196 tmp = mddev->all_mddevs.next;
6197 if (tmp != &all_mddevs)
6198 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
6200 next_mddev = (void*)2;
6203 spin_unlock(&all_mddevs_lock);
6211 static void md_seq_stop(struct seq_file *seq, void *v)
6215 if (mddev && v != (void*)1 && v != (void*)2)
6219 struct mdstat_info {
6223 static int md_seq_show(struct seq_file *seq, void *v)
6228 struct mdstat_info *mi = seq->private;
6229 struct bitmap *bitmap;
6231 if (v == (void*)1) {
6232 struct mdk_personality *pers;
6233 seq_printf(seq, "Personalities : ");
6234 spin_lock(&pers_lock);
6235 list_for_each_entry(pers, &pers_list, list)
6236 seq_printf(seq, "[%s] ", pers->name);
6238 spin_unlock(&pers_lock);
6239 seq_printf(seq, "\n");
6240 mi->event = atomic_read(&md_event_count);
6243 if (v == (void*)2) {
6248 if (mddev_lock(mddev) < 0)
6251 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6252 seq_printf(seq, "%s : %sactive", mdname(mddev),
6253 mddev->pers ? "" : "in");
6256 seq_printf(seq, " (read-only)");
6258 seq_printf(seq, " (auto-read-only)");
6259 seq_printf(seq, " %s", mddev->pers->name);
6263 list_for_each_entry(rdev, &mddev->disks, same_set) {
6264 char b[BDEVNAME_SIZE];
6265 seq_printf(seq, " %s[%d]",
6266 bdevname(rdev->bdev,b), rdev->desc_nr);
6267 if (test_bit(WriteMostly, &rdev->flags))
6268 seq_printf(seq, "(W)");
6269 if (test_bit(Faulty, &rdev->flags)) {
6270 seq_printf(seq, "(F)");
6272 } else if (rdev->raid_disk < 0)
6273 seq_printf(seq, "(S)"); /* spare */
6274 sectors += rdev->sectors;
6277 if (!list_empty(&mddev->disks)) {
6279 seq_printf(seq, "\n %llu blocks",
6280 (unsigned long long)
6281 mddev->array_sectors / 2);
6283 seq_printf(seq, "\n %llu blocks",
6284 (unsigned long long)sectors / 2);
6286 if (mddev->persistent) {
6287 if (mddev->major_version != 0 ||
6288 mddev->minor_version != 90) {
6289 seq_printf(seq," super %d.%d",
6290 mddev->major_version,
6291 mddev->minor_version);
6293 } else if (mddev->external)
6294 seq_printf(seq, " super external:%s",
6295 mddev->metadata_type);
6297 seq_printf(seq, " super non-persistent");
6300 mddev->pers->status(seq, mddev);
6301 seq_printf(seq, "\n ");
6302 if (mddev->pers->sync_request) {
6303 if (mddev->curr_resync > 2) {
6304 status_resync(seq, mddev);
6305 seq_printf(seq, "\n ");
6306 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
6307 seq_printf(seq, "\tresync=DELAYED\n ");
6308 else if (mddev->recovery_cp < MaxSector)
6309 seq_printf(seq, "\tresync=PENDING\n ");
6312 seq_printf(seq, "\n ");
6314 if ((bitmap = mddev->bitmap)) {
6315 unsigned long chunk_kb;
6316 unsigned long flags;
6317 spin_lock_irqsave(&bitmap->lock, flags);
6318 chunk_kb = mddev->bitmap_info.chunksize >> 10;
6319 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6321 bitmap->pages - bitmap->missing_pages,
6323 (bitmap->pages - bitmap->missing_pages)
6324 << (PAGE_SHIFT - 10),
6325 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6326 chunk_kb ? "KB" : "B");
6328 seq_printf(seq, ", file: ");
6329 seq_path(seq, &bitmap->file->f_path, " \t\n");
6332 seq_printf(seq, "\n");
6333 spin_unlock_irqrestore(&bitmap->lock, flags);
6336 seq_printf(seq, "\n");
6338 mddev_unlock(mddev);
6343 static const struct seq_operations md_seq_ops = {
6344 .start = md_seq_start,
6345 .next = md_seq_next,
6346 .stop = md_seq_stop,
6347 .show = md_seq_show,
6350 static int md_seq_open(struct inode *inode, struct file *file)
6353 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6357 error = seq_open(file, &md_seq_ops);
6361 struct seq_file *p = file->private_data;
6363 mi->event = atomic_read(&md_event_count);
6368 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6370 struct seq_file *m = filp->private_data;
6371 struct mdstat_info *mi = m->private;
6374 poll_wait(filp, &md_event_waiters, wait);
6376 /* always allow read */
6377 mask = POLLIN | POLLRDNORM;
6379 if (mi->event != atomic_read(&md_event_count))
6380 mask |= POLLERR | POLLPRI;
6384 static const struct file_operations md_seq_fops = {
6385 .owner = THIS_MODULE,
6386 .open = md_seq_open,
6388 .llseek = seq_lseek,
6389 .release = seq_release_private,
6390 .poll = mdstat_poll,
6393 int register_md_personality(struct mdk_personality *p)
6395 spin_lock(&pers_lock);
6396 list_add_tail(&p->list, &pers_list);
6397 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6398 spin_unlock(&pers_lock);
6402 int unregister_md_personality(struct mdk_personality *p)
6404 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6405 spin_lock(&pers_lock);
6406 list_del_init(&p->list);
6407 spin_unlock(&pers_lock);
6411 static int is_mddev_idle(mddev_t *mddev, int init)
6419 rdev_for_each_rcu(rdev, mddev) {
6420 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6421 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6422 (int)part_stat_read(&disk->part0, sectors[1]) -
6423 atomic_read(&disk->sync_io);
6424 /* sync IO will cause sync_io to increase before the disk_stats
6425 * as sync_io is counted when a request starts, and
6426 * disk_stats is counted when it completes.
6427 * So resync activity will cause curr_events to be smaller than
6428 * when there was no such activity.
6429 * non-sync IO will cause disk_stat to increase without
6430 * increasing sync_io so curr_events will (eventually)
6431 * be larger than it was before. Once it becomes
6432 * substantially larger, the test below will cause
6433 * the array to appear non-idle, and resync will slow
6435 * If there is a lot of outstanding resync activity when
6436 * we set last_event to curr_events, then all that activity
6437 * completing might cause the array to appear non-idle
6438 * and resync will be slowed down even though there might
6439 * not have been non-resync activity. This will only
6440 * happen once though. 'last_events' will soon reflect
6441 * the state where there is little or no outstanding
6442 * resync requests, and further resync activity will
6443 * always make curr_events less than last_events.
6446 if (init || curr_events - rdev->last_events > 64) {
6447 rdev->last_events = curr_events;
6455 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6457 /* another "blocks" (512byte) blocks have been synced */
6458 atomic_sub(blocks, &mddev->recovery_active);
6459 wake_up(&mddev->recovery_wait);
6461 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6462 md_wakeup_thread(mddev->thread);
6463 // stop recovery, signal do_sync ....
6468 /* md_write_start(mddev, bi)
6469 * If we need to update some array metadata (e.g. 'active' flag
6470 * in superblock) before writing, schedule a superblock update
6471 * and wait for it to complete.
6473 void md_write_start(mddev_t *mddev, struct bio *bi)
6476 if (bio_data_dir(bi) != WRITE)
6479 BUG_ON(mddev->ro == 1);
6480 if (mddev->ro == 2) {
6481 /* need to switch to read/write */
6483 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6484 md_wakeup_thread(mddev->thread);
6485 md_wakeup_thread(mddev->sync_thread);
6488 atomic_inc(&mddev->writes_pending);
6489 if (mddev->safemode == 1)
6490 mddev->safemode = 0;
6491 if (mddev->in_sync) {
6492 spin_lock_irq(&mddev->write_lock);
6493 if (mddev->in_sync) {
6495 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6496 md_wakeup_thread(mddev->thread);
6499 spin_unlock_irq(&mddev->write_lock);
6502 sysfs_notify_dirent(mddev->sysfs_state);
6503 wait_event(mddev->sb_wait,
6504 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6505 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6508 void md_write_end(mddev_t *mddev)
6510 if (atomic_dec_and_test(&mddev->writes_pending)) {
6511 if (mddev->safemode == 2)
6512 md_wakeup_thread(mddev->thread);
6513 else if (mddev->safemode_delay)
6514 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6518 /* md_allow_write(mddev)
6519 * Calling this ensures that the array is marked 'active' so that writes
6520 * may proceed without blocking. It is important to call this before
6521 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6522 * Must be called with mddev_lock held.
6524 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6525 * is dropped, so return -EAGAIN after notifying userspace.
6527 int md_allow_write(mddev_t *mddev)
6533 if (!mddev->pers->sync_request)
6536 spin_lock_irq(&mddev->write_lock);
6537 if (mddev->in_sync) {
6539 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6540 if (mddev->safemode_delay &&
6541 mddev->safemode == 0)
6542 mddev->safemode = 1;
6543 spin_unlock_irq(&mddev->write_lock);
6544 md_update_sb(mddev, 0);
6545 sysfs_notify_dirent(mddev->sysfs_state);
6547 spin_unlock_irq(&mddev->write_lock);
6549 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6554 EXPORT_SYMBOL_GPL(md_allow_write);
6556 #define SYNC_MARKS 10
6557 #define SYNC_MARK_STEP (3*HZ)
6558 void md_do_sync(mddev_t *mddev)
6561 unsigned int currspeed = 0,
6563 sector_t max_sectors,j, io_sectors;
6564 unsigned long mark[SYNC_MARKS];
6565 sector_t mark_cnt[SYNC_MARKS];
6567 struct list_head *tmp;
6568 sector_t last_check;
6573 /* just incase thread restarts... */
6574 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6576 if (mddev->ro) /* never try to sync a read-only array */
6579 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6580 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6581 desc = "data-check";
6582 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6583 desc = "requested-resync";
6586 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6591 /* we overload curr_resync somewhat here.
6592 * 0 == not engaged in resync at all
6593 * 2 == checking that there is no conflict with another sync
6594 * 1 == like 2, but have yielded to allow conflicting resync to
6596 * other == active in resync - this many blocks
6598 * Before starting a resync we must have set curr_resync to
6599 * 2, and then checked that every "conflicting" array has curr_resync
6600 * less than ours. When we find one that is the same or higher
6601 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6602 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6603 * This will mean we have to start checking from the beginning again.
6608 mddev->curr_resync = 2;
6611 if (kthread_should_stop())
6612 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6614 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6616 for_each_mddev(mddev2, tmp) {
6617 if (mddev2 == mddev)
6619 if (!mddev->parallel_resync
6620 && mddev2->curr_resync
6621 && match_mddev_units(mddev, mddev2)) {
6623 if (mddev < mddev2 && mddev->curr_resync == 2) {
6624 /* arbitrarily yield */
6625 mddev->curr_resync = 1;
6626 wake_up(&resync_wait);
6628 if (mddev > mddev2 && mddev->curr_resync == 1)
6629 /* no need to wait here, we can wait the next
6630 * time 'round when curr_resync == 2
6633 /* We need to wait 'interruptible' so as not to
6634 * contribute to the load average, and not to
6635 * be caught by 'softlockup'
6637 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6638 if (!kthread_should_stop() &&
6639 mddev2->curr_resync >= mddev->curr_resync) {
6640 printk(KERN_INFO "md: delaying %s of %s"
6641 " until %s has finished (they"
6642 " share one or more physical units)\n",
6643 desc, mdname(mddev), mdname(mddev2));
6645 if (signal_pending(current))
6646 flush_signals(current);
6648 finish_wait(&resync_wait, &wq);
6651 finish_wait(&resync_wait, &wq);
6654 } while (mddev->curr_resync < 2);
6657 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6658 /* resync follows the size requested by the personality,
6659 * which defaults to physical size, but can be virtual size
6661 max_sectors = mddev->resync_max_sectors;
6662 mddev->resync_mismatches = 0;
6663 /* we don't use the checkpoint if there's a bitmap */
6664 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6665 j = mddev->resync_min;
6666 else if (!mddev->bitmap)
6667 j = mddev->recovery_cp;
6669 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6670 max_sectors = mddev->dev_sectors;
6672 /* recovery follows the physical size of devices */
6673 max_sectors = mddev->dev_sectors;
6676 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6677 if (rdev->raid_disk >= 0 &&
6678 !test_bit(Faulty, &rdev->flags) &&
6679 !test_bit(In_sync, &rdev->flags) &&
6680 rdev->recovery_offset < j)
6681 j = rdev->recovery_offset;
6685 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6686 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
6687 " %d KB/sec/disk.\n", speed_min(mddev));
6688 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6689 "(but not more than %d KB/sec) for %s.\n",
6690 speed_max(mddev), desc);
6692 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6695 for (m = 0; m < SYNC_MARKS; m++) {
6697 mark_cnt[m] = io_sectors;
6700 mddev->resync_mark = mark[last_mark];
6701 mddev->resync_mark_cnt = mark_cnt[last_mark];
6704 * Tune reconstruction:
6706 window = 32*(PAGE_SIZE/512);
6707 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6708 window/2,(unsigned long long) max_sectors/2);
6710 atomic_set(&mddev->recovery_active, 0);
6715 "md: resuming %s of %s from checkpoint.\n",
6716 desc, mdname(mddev));
6717 mddev->curr_resync = j;
6719 mddev->curr_resync_completed = mddev->curr_resync;
6721 while (j < max_sectors) {
6726 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6727 ((mddev->curr_resync > mddev->curr_resync_completed &&
6728 (mddev->curr_resync - mddev->curr_resync_completed)
6729 > (max_sectors >> 4)) ||
6730 (j - mddev->curr_resync_completed)*2
6731 >= mddev->resync_max - mddev->curr_resync_completed
6733 /* time to update curr_resync_completed */
6734 blk_unplug(mddev->queue);
6735 wait_event(mddev->recovery_wait,
6736 atomic_read(&mddev->recovery_active) == 0);
6737 mddev->curr_resync_completed =
6739 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6740 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6743 while (j >= mddev->resync_max && !kthread_should_stop()) {
6744 /* As this condition is controlled by user-space,
6745 * we can block indefinitely, so use '_interruptible'
6746 * to avoid triggering warnings.
6748 flush_signals(current); /* just in case */
6749 wait_event_interruptible(mddev->recovery_wait,
6750 mddev->resync_max > j
6751 || kthread_should_stop());
6754 if (kthread_should_stop())
6757 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6758 currspeed < speed_min(mddev));
6760 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6764 if (!skipped) { /* actual IO requested */
6765 io_sectors += sectors;
6766 atomic_add(sectors, &mddev->recovery_active);
6770 if (j>1) mddev->curr_resync = j;
6771 mddev->curr_mark_cnt = io_sectors;
6772 if (last_check == 0)
6773 /* this is the earliers that rebuilt will be
6774 * visible in /proc/mdstat
6776 md_new_event(mddev);
6778 if (last_check + window > io_sectors || j == max_sectors)
6781 last_check = io_sectors;
6783 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6787 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6789 int next = (last_mark+1) % SYNC_MARKS;
6791 mddev->resync_mark = mark[next];
6792 mddev->resync_mark_cnt = mark_cnt[next];
6793 mark[next] = jiffies;
6794 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6799 if (kthread_should_stop())
6804 * this loop exits only if either when we are slower than
6805 * the 'hard' speed limit, or the system was IO-idle for
6807 * the system might be non-idle CPU-wise, but we only care
6808 * about not overloading the IO subsystem. (things like an
6809 * e2fsck being done on the RAID array should execute fast)
6811 blk_unplug(mddev->queue);
6814 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6815 /((jiffies-mddev->resync_mark)/HZ +1) +1;
6817 if (currspeed > speed_min(mddev)) {
6818 if ((currspeed > speed_max(mddev)) ||
6819 !is_mddev_idle(mddev, 0)) {
6825 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6827 * this also signals 'finished resyncing' to md_stop
6830 blk_unplug(mddev->queue);
6832 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6834 /* tell personality that we are finished */
6835 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6837 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6838 mddev->curr_resync > 2) {
6839 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6840 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6841 if (mddev->curr_resync >= mddev->recovery_cp) {
6843 "md: checkpointing %s of %s.\n",
6844 desc, mdname(mddev));
6845 mddev->recovery_cp = mddev->curr_resync;
6848 mddev->recovery_cp = MaxSector;
6850 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6851 mddev->curr_resync = MaxSector;
6853 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6854 if (rdev->raid_disk >= 0 &&
6855 !test_bit(Faulty, &rdev->flags) &&
6856 !test_bit(In_sync, &rdev->flags) &&
6857 rdev->recovery_offset < mddev->curr_resync)
6858 rdev->recovery_offset = mddev->curr_resync;
6862 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6865 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6866 /* We completed so min/max setting can be forgotten if used. */
6867 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6868 mddev->resync_min = 0;
6869 mddev->resync_max = MaxSector;
6870 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6871 mddev->resync_min = mddev->curr_resync_completed;
6872 mddev->curr_resync = 0;
6873 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6874 mddev->curr_resync_completed = 0;
6875 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6876 wake_up(&resync_wait);
6877 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6878 md_wakeup_thread(mddev->thread);
6883 * got a signal, exit.
6886 "md: md_do_sync() got signal ... exiting\n");
6887 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6891 EXPORT_SYMBOL_GPL(md_do_sync);
6894 static int remove_and_add_spares(mddev_t *mddev)
6899 mddev->curr_resync_completed = 0;
6901 list_for_each_entry(rdev, &mddev->disks, same_set)
6902 if (rdev->raid_disk >= 0 &&
6903 !test_bit(Blocked, &rdev->flags) &&
6904 (test_bit(Faulty, &rdev->flags) ||
6905 ! test_bit(In_sync, &rdev->flags)) &&
6906 atomic_read(&rdev->nr_pending)==0) {
6907 if (mddev->pers->hot_remove_disk(
6908 mddev, rdev->raid_disk)==0) {
6910 sprintf(nm,"rd%d", rdev->raid_disk);
6911 sysfs_remove_link(&mddev->kobj, nm);
6912 rdev->raid_disk = -1;
6916 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6917 list_for_each_entry(rdev, &mddev->disks, same_set) {
6918 if (rdev->raid_disk >= 0 &&
6919 !test_bit(In_sync, &rdev->flags) &&
6920 !test_bit(Blocked, &rdev->flags))
6922 if (rdev->raid_disk < 0
6923 && !test_bit(Faulty, &rdev->flags)) {
6924 rdev->recovery_offset = 0;
6926 hot_add_disk(mddev, rdev) == 0) {
6928 sprintf(nm, "rd%d", rdev->raid_disk);
6929 if (sysfs_create_link(&mddev->kobj,
6932 "md: cannot register "
6936 md_new_event(mddev);
6937 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6946 * This routine is regularly called by all per-raid-array threads to
6947 * deal with generic issues like resync and super-block update.
6948 * Raid personalities that don't have a thread (linear/raid0) do not
6949 * need this as they never do any recovery or update the superblock.
6951 * It does not do any resync itself, but rather "forks" off other threads
6952 * to do that as needed.
6953 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6954 * "->recovery" and create a thread at ->sync_thread.
6955 * When the thread finishes it sets MD_RECOVERY_DONE
6956 * and wakeups up this thread which will reap the thread and finish up.
6957 * This thread also removes any faulty devices (with nr_pending == 0).
6959 * The overall approach is:
6960 * 1/ if the superblock needs updating, update it.
6961 * 2/ If a recovery thread is running, don't do anything else.
6962 * 3/ If recovery has finished, clean up, possibly marking spares active.
6963 * 4/ If there are any faulty devices, remove them.
6964 * 5/ If array is degraded, try to add spares devices
6965 * 6/ If array has spares or is not in-sync, start a resync thread.
6967 void md_check_recovery(mddev_t *mddev)
6973 bitmap_daemon_work(mddev);
6978 if (signal_pending(current)) {
6979 if (mddev->pers->sync_request && !mddev->external) {
6980 printk(KERN_INFO "md: %s in immediate safe mode\n",
6982 mddev->safemode = 2;
6984 flush_signals(current);
6987 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6990 (mddev->flags && !mddev->external) ||
6991 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6992 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6993 (mddev->external == 0 && mddev->safemode == 1) ||
6994 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6995 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6999 if (mddev_trylock(mddev)) {
7003 /* Only thing we do on a ro array is remove
7006 remove_and_add_spares(mddev);
7007 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7011 if (!mddev->external) {
7013 spin_lock_irq(&mddev->write_lock);
7014 if (mddev->safemode &&
7015 !atomic_read(&mddev->writes_pending) &&
7017 mddev->recovery_cp == MaxSector) {
7020 if (mddev->persistent)
7021 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7023 if (mddev->safemode == 1)
7024 mddev->safemode = 0;
7025 spin_unlock_irq(&mddev->write_lock);
7027 sysfs_notify_dirent(mddev->sysfs_state);
7031 md_update_sb(mddev, 0);
7033 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7034 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7035 /* resync/recovery still happening */
7036 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7039 if (mddev->sync_thread) {
7040 /* resync has finished, collect result */
7041 md_unregister_thread(mddev->sync_thread);
7042 mddev->sync_thread = NULL;
7043 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7044 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7046 /* activate any spares */
7047 if (mddev->pers->spare_active(mddev))
7048 sysfs_notify(&mddev->kobj, NULL,
7051 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7052 mddev->pers->finish_reshape)
7053 mddev->pers->finish_reshape(mddev);
7054 md_update_sb(mddev, 1);
7056 /* if array is no-longer degraded, then any saved_raid_disk
7057 * information must be scrapped
7059 if (!mddev->degraded)
7060 list_for_each_entry(rdev, &mddev->disks, same_set)
7061 rdev->saved_raid_disk = -1;
7063 mddev->recovery = 0;
7064 /* flag recovery needed just to double check */
7065 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7066 sysfs_notify_dirent(mddev->sysfs_action);
7067 md_new_event(mddev);
7070 /* Set RUNNING before clearing NEEDED to avoid
7071 * any transients in the value of "sync_action".
7073 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7074 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7075 /* Clear some bits that don't mean anything, but
7078 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7079 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7081 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7083 /* no recovery is running.
7084 * remove any failed drives, then
7085 * add spares if possible.
7086 * Spare are also removed and re-added, to allow
7087 * the personality to fail the re-add.
7090 if (mddev->reshape_position != MaxSector) {
7091 if (mddev->pers->check_reshape == NULL ||
7092 mddev->pers->check_reshape(mddev) != 0)
7093 /* Cannot proceed */
7095 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7096 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7097 } else if ((spares = remove_and_add_spares(mddev))) {
7098 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7099 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7100 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7101 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7102 } else if (mddev->recovery_cp < MaxSector) {
7103 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7104 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7105 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7106 /* nothing to be done ... */
7109 if (mddev->pers->sync_request) {
7110 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
7111 /* We are adding a device or devices to an array
7112 * which has the bitmap stored on all devices.
7113 * So make sure all bitmap pages get written
7115 bitmap_write_all(mddev->bitmap);
7117 mddev->sync_thread = md_register_thread(md_do_sync,
7120 if (!mddev->sync_thread) {
7121 printk(KERN_ERR "%s: could not start resync"
7124 /* leave the spares where they are, it shouldn't hurt */
7125 mddev->recovery = 0;
7127 md_wakeup_thread(mddev->sync_thread);
7128 sysfs_notify_dirent(mddev->sysfs_action);
7129 md_new_event(mddev);
7132 if (!mddev->sync_thread) {
7133 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7134 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7136 if (mddev->sysfs_action)
7137 sysfs_notify_dirent(mddev->sysfs_action);
7139 mddev_unlock(mddev);
7143 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
7145 sysfs_notify_dirent(rdev->sysfs_state);
7146 wait_event_timeout(rdev->blocked_wait,
7147 !test_bit(Blocked, &rdev->flags),
7148 msecs_to_jiffies(5000));
7149 rdev_dec_pending(rdev, mddev);
7151 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
7153 static int md_notify_reboot(struct notifier_block *this,
7154 unsigned long code, void *x)
7156 struct list_head *tmp;
7159 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
7161 printk(KERN_INFO "md: stopping all md devices.\n");
7163 for_each_mddev(mddev, tmp)
7164 if (mddev_trylock(mddev)) {
7165 /* Force a switch to readonly even array
7166 * appears to still be in use. Hence
7169 md_set_readonly(mddev, 100);
7170 mddev_unlock(mddev);
7173 * certain more exotic SCSI devices are known to be
7174 * volatile wrt too early system reboots. While the
7175 * right place to handle this issue is the given
7176 * driver, we do want to have a safe RAID driver ...
7183 static struct notifier_block md_notifier = {
7184 .notifier_call = md_notify_reboot,
7186 .priority = INT_MAX, /* before any real devices */
7189 static void md_geninit(void)
7191 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
7193 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
7196 static int __init md_init(void)
7198 if (register_blkdev(MD_MAJOR, "md"))
7200 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
7201 unregister_blkdev(MD_MAJOR, "md");
7204 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
7205 md_probe, NULL, NULL);
7206 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
7207 md_probe, NULL, NULL);
7209 register_reboot_notifier(&md_notifier);
7210 raid_table_header = register_sysctl_table(raid_root_table);
7220 * Searches all registered partitions for autorun RAID arrays
7224 static LIST_HEAD(all_detected_devices);
7225 struct detected_devices_node {
7226 struct list_head list;
7230 void md_autodetect_dev(dev_t dev)
7232 struct detected_devices_node *node_detected_dev;
7234 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
7235 if (node_detected_dev) {
7236 node_detected_dev->dev = dev;
7237 list_add_tail(&node_detected_dev->list, &all_detected_devices);
7239 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
7240 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
7245 static void autostart_arrays(int part)
7248 struct detected_devices_node *node_detected_dev;
7250 int i_scanned, i_passed;
7255 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
7257 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
7259 node_detected_dev = list_entry(all_detected_devices.next,
7260 struct detected_devices_node, list);
7261 list_del(&node_detected_dev->list);
7262 dev = node_detected_dev->dev;
7263 kfree(node_detected_dev);
7264 rdev = md_import_device(dev,0, 90);
7268 if (test_bit(Faulty, &rdev->flags)) {
7272 set_bit(AutoDetected, &rdev->flags);
7273 list_add(&rdev->same_set, &pending_raid_disks);
7277 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
7278 i_scanned, i_passed);
7280 autorun_devices(part);
7283 #endif /* !MODULE */
7285 static __exit void md_exit(void)
7288 struct list_head *tmp;
7290 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
7291 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
7293 unregister_blkdev(MD_MAJOR,"md");
7294 unregister_blkdev(mdp_major, "mdp");
7295 unregister_reboot_notifier(&md_notifier);
7296 unregister_sysctl_table(raid_table_header);
7297 remove_proc_entry("mdstat", NULL);
7298 for_each_mddev(mddev, tmp) {
7299 export_array(mddev);
7300 mddev->hold_active = 0;
7304 subsys_initcall(md_init);
7305 module_exit(md_exit)
7307 static int get_ro(char *buffer, struct kernel_param *kp)
7309 return sprintf(buffer, "%d", start_readonly);
7311 static int set_ro(const char *val, struct kernel_param *kp)
7314 int num = simple_strtoul(val, &e, 10);
7315 if (*val && (*e == '\0' || *e == '\n')) {
7316 start_readonly = num;
7322 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
7323 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
7325 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
7327 EXPORT_SYMBOL(register_md_personality);
7328 EXPORT_SYMBOL(unregister_md_personality);
7329 EXPORT_SYMBOL(md_error);
7330 EXPORT_SYMBOL(md_done_sync);
7331 EXPORT_SYMBOL(md_write_start);
7332 EXPORT_SYMBOL(md_write_end);
7333 EXPORT_SYMBOL(md_register_thread);
7334 EXPORT_SYMBOL(md_unregister_thread);
7335 EXPORT_SYMBOL(md_wakeup_thread);
7336 EXPORT_SYMBOL(md_check_recovery);
7337 MODULE_LICENSE("GPL");
7338 MODULE_DESCRIPTION("MD RAID framework");
7340 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);