2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part);
61 static LIST_HEAD(pers_list);
62 static DEFINE_SPINLOCK(pers_lock);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min = 1000;
84 static int sysctl_speed_limit_max = 200000;
85 static inline int speed_min(mddev_t *mddev)
87 return mddev->sync_speed_min ?
88 mddev->sync_speed_min : sysctl_speed_limit_min;
91 static inline int speed_max(mddev_t *mddev)
93 return mddev->sync_speed_max ?
94 mddev->sync_speed_max : sysctl_speed_limit_max;
97 static struct ctl_table_header *raid_table_header;
99 static ctl_table raid_table[] = {
101 .procname = "speed_limit_min",
102 .data = &sysctl_speed_limit_min,
103 .maxlen = sizeof(int),
104 .mode = S_IRUGO|S_IWUSR,
105 .proc_handler = proc_dointvec,
108 .procname = "speed_limit_max",
109 .data = &sysctl_speed_limit_max,
110 .maxlen = sizeof(int),
111 .mode = S_IRUGO|S_IWUSR,
112 .proc_handler = proc_dointvec,
117 static ctl_table raid_dir_table[] = {
121 .mode = S_IRUGO|S_IXUGO,
127 static ctl_table raid_root_table[] = {
132 .child = raid_dir_table,
137 static const struct block_device_operations md_fops;
139 static int start_readonly;
142 * We have a system wide 'event count' that is incremented
143 * on any 'interesting' event, and readers of /proc/mdstat
144 * can use 'poll' or 'select' to find out when the event
148 * start array, stop array, error, add device, remove device,
149 * start build, activate spare
151 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
152 static atomic_t md_event_count;
153 void md_new_event(mddev_t *mddev)
155 atomic_inc(&md_event_count);
156 wake_up(&md_event_waiters);
158 EXPORT_SYMBOL_GPL(md_new_event);
160 /* Alternate version that can be called from interrupts
161 * when calling sysfs_notify isn't needed.
163 static void md_new_event_inintr(mddev_t *mddev)
165 atomic_inc(&md_event_count);
166 wake_up(&md_event_waiters);
170 * Enables to iterate over all existing md arrays
171 * all_mddevs_lock protects this list.
173 static LIST_HEAD(all_mddevs);
174 static DEFINE_SPINLOCK(all_mddevs_lock);
178 * iterates through all used mddevs in the system.
179 * We take care to grab the all_mddevs_lock whenever navigating
180 * the list, and to always hold a refcount when unlocked.
181 * Any code which breaks out of this loop while own
182 * a reference to the current mddev and must mddev_put it.
184 #define for_each_mddev(mddev,tmp) \
186 for (({ spin_lock(&all_mddevs_lock); \
187 tmp = all_mddevs.next; \
189 ({ if (tmp != &all_mddevs) \
190 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
191 spin_unlock(&all_mddevs_lock); \
192 if (mddev) mddev_put(mddev); \
193 mddev = list_entry(tmp, mddev_t, all_mddevs); \
194 tmp != &all_mddevs;}); \
195 ({ spin_lock(&all_mddevs_lock); \
200 /* Rather than calling directly into the personality make_request function,
201 * IO requests come here first so that we can check if the device is
202 * being suspended pending a reconfiguration.
203 * We hold a refcount over the call to ->make_request. By the time that
204 * call has finished, the bio has been linked into some internal structure
205 * and so is visible to ->quiesce(), so we don't need the refcount any more.
207 static int md_make_request(struct request_queue *q, struct bio *bio)
209 mddev_t *mddev = q->queuedata;
211 if (mddev == NULL || mddev->pers == NULL) {
216 if (mddev->suspended || mddev->barrier) {
219 prepare_to_wait(&mddev->sb_wait, &__wait,
220 TASK_UNINTERRUPTIBLE);
221 if (!mddev->suspended && !mddev->barrier)
227 finish_wait(&mddev->sb_wait, &__wait);
229 atomic_inc(&mddev->active_io);
231 rv = mddev->pers->make_request(q, bio);
232 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
233 wake_up(&mddev->sb_wait);
238 static void mddev_suspend(mddev_t *mddev)
240 BUG_ON(mddev->suspended);
241 mddev->suspended = 1;
243 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
244 mddev->pers->quiesce(mddev, 1);
245 md_unregister_thread(mddev->thread);
246 mddev->thread = NULL;
247 /* we now know that no code is executing in the personality module,
248 * except possibly the tail end of a ->bi_end_io function, but that
249 * is certain to complete before the module has a chance to get
254 static void mddev_resume(mddev_t *mddev)
256 mddev->suspended = 0;
257 wake_up(&mddev->sb_wait);
258 mddev->pers->quiesce(mddev, 0);
261 int mddev_congested(mddev_t *mddev, int bits)
265 return mddev->suspended;
267 EXPORT_SYMBOL(mddev_congested);
270 * Generic barrier handling for md
273 #define POST_REQUEST_BARRIER ((void*)1)
275 static void md_end_barrier(struct bio *bio, int err)
277 mdk_rdev_t *rdev = bio->bi_private;
278 mddev_t *mddev = rdev->mddev;
279 if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
280 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
282 rdev_dec_pending(rdev, mddev);
284 if (atomic_dec_and_test(&mddev->flush_pending)) {
285 if (mddev->barrier == POST_REQUEST_BARRIER) {
286 /* This was a post-request barrier */
287 mddev->barrier = NULL;
288 wake_up(&mddev->sb_wait);
290 /* The pre-request barrier has finished */
291 schedule_work(&mddev->barrier_work);
296 static void submit_barriers(mddev_t *mddev)
301 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
302 if (rdev->raid_disk >= 0 &&
303 !test_bit(Faulty, &rdev->flags)) {
304 /* Take two references, one is dropped
305 * when request finishes, one after
306 * we reclaim rcu_read_lock
309 atomic_inc(&rdev->nr_pending);
310 atomic_inc(&rdev->nr_pending);
312 bi = bio_alloc(GFP_KERNEL, 0);
313 bi->bi_end_io = md_end_barrier;
314 bi->bi_private = rdev;
315 bi->bi_bdev = rdev->bdev;
316 atomic_inc(&mddev->flush_pending);
317 submit_bio(WRITE_BARRIER, bi);
319 rdev_dec_pending(rdev, mddev);
324 static void md_submit_barrier(struct work_struct *ws)
326 mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
327 struct bio *bio = mddev->barrier;
329 atomic_set(&mddev->flush_pending, 1);
331 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
332 bio_endio(bio, -EOPNOTSUPP);
333 else if (bio->bi_size == 0)
334 /* an empty barrier - all done */
337 bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
338 if (mddev->pers->make_request(mddev->queue, bio))
339 generic_make_request(bio);
340 mddev->barrier = POST_REQUEST_BARRIER;
341 submit_barriers(mddev);
343 if (atomic_dec_and_test(&mddev->flush_pending)) {
344 mddev->barrier = NULL;
345 wake_up(&mddev->sb_wait);
349 void md_barrier_request(mddev_t *mddev, struct bio *bio)
351 spin_lock_irq(&mddev->write_lock);
352 wait_event_lock_irq(mddev->sb_wait,
354 mddev->write_lock, /*nothing*/);
355 mddev->barrier = bio;
356 spin_unlock_irq(&mddev->write_lock);
358 atomic_set(&mddev->flush_pending, 1);
359 INIT_WORK(&mddev->barrier_work, md_submit_barrier);
361 submit_barriers(mddev);
363 if (atomic_dec_and_test(&mddev->flush_pending))
364 schedule_work(&mddev->barrier_work);
366 EXPORT_SYMBOL(md_barrier_request);
368 static inline mddev_t *mddev_get(mddev_t *mddev)
370 atomic_inc(&mddev->active);
374 static void mddev_delayed_delete(struct work_struct *ws);
376 static void mddev_put(mddev_t *mddev)
378 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
380 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
381 !mddev->hold_active) {
382 list_del(&mddev->all_mddevs);
383 if (mddev->gendisk) {
384 /* we did a probe so need to clean up.
385 * Call schedule_work inside the spinlock
386 * so that flush_scheduled_work() after
387 * mddev_find will succeed in waiting for the
390 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
391 schedule_work(&mddev->del_work);
395 spin_unlock(&all_mddevs_lock);
398 static mddev_t * mddev_find(dev_t unit)
400 mddev_t *mddev, *new = NULL;
403 spin_lock(&all_mddevs_lock);
406 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
407 if (mddev->unit == unit) {
409 spin_unlock(&all_mddevs_lock);
415 list_add(&new->all_mddevs, &all_mddevs);
416 spin_unlock(&all_mddevs_lock);
417 new->hold_active = UNTIL_IOCTL;
421 /* find an unused unit number */
422 static int next_minor = 512;
423 int start = next_minor;
427 dev = MKDEV(MD_MAJOR, next_minor);
429 if (next_minor > MINORMASK)
431 if (next_minor == start) {
432 /* Oh dear, all in use. */
433 spin_unlock(&all_mddevs_lock);
439 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
440 if (mddev->unit == dev) {
446 new->md_minor = MINOR(dev);
447 new->hold_active = UNTIL_STOP;
448 list_add(&new->all_mddevs, &all_mddevs);
449 spin_unlock(&all_mddevs_lock);
452 spin_unlock(&all_mddevs_lock);
454 new = kzalloc(sizeof(*new), GFP_KERNEL);
459 if (MAJOR(unit) == MD_MAJOR)
460 new->md_minor = MINOR(unit);
462 new->md_minor = MINOR(unit) >> MdpMinorShift;
464 mutex_init(&new->open_mutex);
465 mutex_init(&new->reconfig_mutex);
466 mutex_init(&new->bitmap_info.mutex);
467 INIT_LIST_HEAD(&new->disks);
468 INIT_LIST_HEAD(&new->all_mddevs);
469 init_timer(&new->safemode_timer);
470 atomic_set(&new->active, 1);
471 atomic_set(&new->openers, 0);
472 atomic_set(&new->active_io, 0);
473 spin_lock_init(&new->write_lock);
474 atomic_set(&new->flush_pending, 0);
475 init_waitqueue_head(&new->sb_wait);
476 init_waitqueue_head(&new->recovery_wait);
477 new->reshape_position = MaxSector;
479 new->resync_max = MaxSector;
480 new->level = LEVEL_NONE;
485 static inline int mddev_lock(mddev_t * mddev)
487 return mutex_lock_interruptible(&mddev->reconfig_mutex);
490 static inline int mddev_is_locked(mddev_t *mddev)
492 return mutex_is_locked(&mddev->reconfig_mutex);
495 static inline int mddev_trylock(mddev_t * mddev)
497 return mutex_trylock(&mddev->reconfig_mutex);
500 static inline void mddev_unlock(mddev_t * mddev)
502 mutex_unlock(&mddev->reconfig_mutex);
504 md_wakeup_thread(mddev->thread);
507 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
511 list_for_each_entry(rdev, &mddev->disks, same_set)
512 if (rdev->desc_nr == nr)
518 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
522 list_for_each_entry(rdev, &mddev->disks, same_set)
523 if (rdev->bdev->bd_dev == dev)
529 static struct mdk_personality *find_pers(int level, char *clevel)
531 struct mdk_personality *pers;
532 list_for_each_entry(pers, &pers_list, list) {
533 if (level != LEVEL_NONE && pers->level == level)
535 if (strcmp(pers->name, clevel)==0)
541 /* return the offset of the super block in 512byte sectors */
542 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
544 sector_t num_sectors = bdev->bd_inode->i_size / 512;
545 return MD_NEW_SIZE_SECTORS(num_sectors);
548 static int alloc_disk_sb(mdk_rdev_t * rdev)
553 rdev->sb_page = alloc_page(GFP_KERNEL);
554 if (!rdev->sb_page) {
555 printk(KERN_ALERT "md: out of memory.\n");
562 static void free_disk_sb(mdk_rdev_t * rdev)
565 put_page(rdev->sb_page);
567 rdev->sb_page = NULL;
574 static void super_written(struct bio *bio, int error)
576 mdk_rdev_t *rdev = bio->bi_private;
577 mddev_t *mddev = rdev->mddev;
579 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
580 printk("md: super_written gets error=%d, uptodate=%d\n",
581 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
582 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
583 md_error(mddev, rdev);
586 if (atomic_dec_and_test(&mddev->pending_writes))
587 wake_up(&mddev->sb_wait);
591 static void super_written_barrier(struct bio *bio, int error)
593 struct bio *bio2 = bio->bi_private;
594 mdk_rdev_t *rdev = bio2->bi_private;
595 mddev_t *mddev = rdev->mddev;
597 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
598 error == -EOPNOTSUPP) {
600 /* barriers don't appear to be supported :-( */
601 set_bit(BarriersNotsupp, &rdev->flags);
602 mddev->barriers_work = 0;
603 spin_lock_irqsave(&mddev->write_lock, flags);
604 bio2->bi_next = mddev->biolist;
605 mddev->biolist = bio2;
606 spin_unlock_irqrestore(&mddev->write_lock, flags);
607 wake_up(&mddev->sb_wait);
611 bio->bi_private = rdev;
612 super_written(bio, error);
616 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
617 sector_t sector, int size, struct page *page)
619 /* write first size bytes of page to sector of rdev
620 * Increment mddev->pending_writes before returning
621 * and decrement it on completion, waking up sb_wait
622 * if zero is reached.
623 * If an error occurred, call md_error
625 * As we might need to resubmit the request if BIO_RW_BARRIER
626 * causes ENOTSUPP, we allocate a spare bio...
628 struct bio *bio = bio_alloc(GFP_NOIO, 1);
629 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
631 bio->bi_bdev = rdev->bdev;
632 bio->bi_sector = sector;
633 bio_add_page(bio, page, size, 0);
634 bio->bi_private = rdev;
635 bio->bi_end_io = super_written;
638 atomic_inc(&mddev->pending_writes);
639 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
641 rw |= (1<<BIO_RW_BARRIER);
642 rbio = bio_clone(bio, GFP_NOIO);
643 rbio->bi_private = bio;
644 rbio->bi_end_io = super_written_barrier;
645 submit_bio(rw, rbio);
650 void md_super_wait(mddev_t *mddev)
652 /* wait for all superblock writes that were scheduled to complete.
653 * if any had to be retried (due to BARRIER problems), retry them
657 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
658 if (atomic_read(&mddev->pending_writes)==0)
660 while (mddev->biolist) {
662 spin_lock_irq(&mddev->write_lock);
663 bio = mddev->biolist;
664 mddev->biolist = bio->bi_next ;
666 spin_unlock_irq(&mddev->write_lock);
667 submit_bio(bio->bi_rw, bio);
671 finish_wait(&mddev->sb_wait, &wq);
674 static void bi_complete(struct bio *bio, int error)
676 complete((struct completion*)bio->bi_private);
679 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
680 struct page *page, int rw)
682 struct bio *bio = bio_alloc(GFP_NOIO, 1);
683 struct completion event;
686 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
689 bio->bi_sector = sector;
690 bio_add_page(bio, page, size, 0);
691 init_completion(&event);
692 bio->bi_private = &event;
693 bio->bi_end_io = bi_complete;
695 wait_for_completion(&event);
697 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
701 EXPORT_SYMBOL_GPL(sync_page_io);
703 static int read_disk_sb(mdk_rdev_t * rdev, int size)
705 char b[BDEVNAME_SIZE];
706 if (!rdev->sb_page) {
714 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
720 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
721 bdevname(rdev->bdev,b));
725 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
727 return sb1->set_uuid0 == sb2->set_uuid0 &&
728 sb1->set_uuid1 == sb2->set_uuid1 &&
729 sb1->set_uuid2 == sb2->set_uuid2 &&
730 sb1->set_uuid3 == sb2->set_uuid3;
733 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
736 mdp_super_t *tmp1, *tmp2;
738 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
739 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
741 if (!tmp1 || !tmp2) {
743 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
751 * nr_disks is not constant
756 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
764 static u32 md_csum_fold(u32 csum)
766 csum = (csum & 0xffff) + (csum >> 16);
767 return (csum & 0xffff) + (csum >> 16);
770 static unsigned int calc_sb_csum(mdp_super_t * sb)
773 u32 *sb32 = (u32*)sb;
775 unsigned int disk_csum, csum;
777 disk_csum = sb->sb_csum;
780 for (i = 0; i < MD_SB_BYTES/4 ; i++)
782 csum = (newcsum & 0xffffffff) + (newcsum>>32);
786 /* This used to use csum_partial, which was wrong for several
787 * reasons including that different results are returned on
788 * different architectures. It isn't critical that we get exactly
789 * the same return value as before (we always csum_fold before
790 * testing, and that removes any differences). However as we
791 * know that csum_partial always returned a 16bit value on
792 * alphas, do a fold to maximise conformity to previous behaviour.
794 sb->sb_csum = md_csum_fold(disk_csum);
796 sb->sb_csum = disk_csum;
803 * Handle superblock details.
804 * We want to be able to handle multiple superblock formats
805 * so we have a common interface to them all, and an array of
806 * different handlers.
807 * We rely on user-space to write the initial superblock, and support
808 * reading and updating of superblocks.
809 * Interface methods are:
810 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
811 * loads and validates a superblock on dev.
812 * if refdev != NULL, compare superblocks on both devices
814 * 0 - dev has a superblock that is compatible with refdev
815 * 1 - dev has a superblock that is compatible and newer than refdev
816 * so dev should be used as the refdev in future
817 * -EINVAL superblock incompatible or invalid
818 * -othererror e.g. -EIO
820 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
821 * Verify that dev is acceptable into mddev.
822 * The first time, mddev->raid_disks will be 0, and data from
823 * dev should be merged in. Subsequent calls check that dev
824 * is new enough. Return 0 or -EINVAL
826 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
827 * Update the superblock for rdev with data in mddev
828 * This does not write to disc.
834 struct module *owner;
835 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
837 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
838 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
839 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
840 sector_t num_sectors);
844 * Check that the given mddev has no bitmap.
846 * This function is called from the run method of all personalities that do not
847 * support bitmaps. It prints an error message and returns non-zero if mddev
848 * has a bitmap. Otherwise, it returns 0.
851 int md_check_no_bitmap(mddev_t *mddev)
853 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
855 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
856 mdname(mddev), mddev->pers->name);
859 EXPORT_SYMBOL(md_check_no_bitmap);
862 * load_super for 0.90.0
864 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
866 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
871 * Calculate the position of the superblock (512byte sectors),
872 * it's at the end of the disk.
874 * It also happens to be a multiple of 4Kb.
876 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
878 ret = read_disk_sb(rdev, MD_SB_BYTES);
883 bdevname(rdev->bdev, b);
884 sb = (mdp_super_t*)page_address(rdev->sb_page);
886 if (sb->md_magic != MD_SB_MAGIC) {
887 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
892 if (sb->major_version != 0 ||
893 sb->minor_version < 90 ||
894 sb->minor_version > 91) {
895 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
896 sb->major_version, sb->minor_version,
901 if (sb->raid_disks <= 0)
904 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
905 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
910 rdev->preferred_minor = sb->md_minor;
911 rdev->data_offset = 0;
912 rdev->sb_size = MD_SB_BYTES;
914 if (sb->level == LEVEL_MULTIPATH)
917 rdev->desc_nr = sb->this_disk.number;
923 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
924 if (!uuid_equal(refsb, sb)) {
925 printk(KERN_WARNING "md: %s has different UUID to %s\n",
926 b, bdevname(refdev->bdev,b2));
929 if (!sb_equal(refsb, sb)) {
930 printk(KERN_WARNING "md: %s has same UUID"
931 " but different superblock to %s\n",
932 b, bdevname(refdev->bdev, b2));
936 ev2 = md_event(refsb);
942 rdev->sectors = rdev->sb_start;
944 if (rdev->sectors < sb->size * 2 && sb->level > 1)
945 /* "this cannot possibly happen" ... */
953 * validate_super for 0.90.0
955 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
958 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
959 __u64 ev1 = md_event(sb);
961 rdev->raid_disk = -1;
962 clear_bit(Faulty, &rdev->flags);
963 clear_bit(In_sync, &rdev->flags);
964 clear_bit(WriteMostly, &rdev->flags);
965 clear_bit(BarriersNotsupp, &rdev->flags);
967 if (mddev->raid_disks == 0) {
968 mddev->major_version = 0;
969 mddev->minor_version = sb->minor_version;
970 mddev->patch_version = sb->patch_version;
972 mddev->chunk_sectors = sb->chunk_size >> 9;
973 mddev->ctime = sb->ctime;
974 mddev->utime = sb->utime;
975 mddev->level = sb->level;
976 mddev->clevel[0] = 0;
977 mddev->layout = sb->layout;
978 mddev->raid_disks = sb->raid_disks;
979 mddev->dev_sectors = sb->size * 2;
981 mddev->bitmap_info.offset = 0;
982 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
984 if (mddev->minor_version >= 91) {
985 mddev->reshape_position = sb->reshape_position;
986 mddev->delta_disks = sb->delta_disks;
987 mddev->new_level = sb->new_level;
988 mddev->new_layout = sb->new_layout;
989 mddev->new_chunk_sectors = sb->new_chunk >> 9;
991 mddev->reshape_position = MaxSector;
992 mddev->delta_disks = 0;
993 mddev->new_level = mddev->level;
994 mddev->new_layout = mddev->layout;
995 mddev->new_chunk_sectors = mddev->chunk_sectors;
998 if (sb->state & (1<<MD_SB_CLEAN))
999 mddev->recovery_cp = MaxSector;
1001 if (sb->events_hi == sb->cp_events_hi &&
1002 sb->events_lo == sb->cp_events_lo) {
1003 mddev->recovery_cp = sb->recovery_cp;
1005 mddev->recovery_cp = 0;
1008 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1009 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1010 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1011 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1013 mddev->max_disks = MD_SB_DISKS;
1015 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1016 mddev->bitmap_info.file == NULL)
1017 mddev->bitmap_info.offset =
1018 mddev->bitmap_info.default_offset;
1020 } else if (mddev->pers == NULL) {
1021 /* Insist on good event counter while assembling */
1023 if (ev1 < mddev->events)
1025 } else if (mddev->bitmap) {
1026 /* if adding to array with a bitmap, then we can accept an
1027 * older device ... but not too old.
1029 if (ev1 < mddev->bitmap->events_cleared)
1032 if (ev1 < mddev->events)
1033 /* just a hot-add of a new device, leave raid_disk at -1 */
1037 if (mddev->level != LEVEL_MULTIPATH) {
1038 desc = sb->disks + rdev->desc_nr;
1040 if (desc->state & (1<<MD_DISK_FAULTY))
1041 set_bit(Faulty, &rdev->flags);
1042 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1043 desc->raid_disk < mddev->raid_disks */) {
1044 set_bit(In_sync, &rdev->flags);
1045 rdev->raid_disk = desc->raid_disk;
1046 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1047 /* active but not in sync implies recovery up to
1048 * reshape position. We don't know exactly where
1049 * that is, so set to zero for now */
1050 if (mddev->minor_version >= 91) {
1051 rdev->recovery_offset = 0;
1052 rdev->raid_disk = desc->raid_disk;
1055 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1056 set_bit(WriteMostly, &rdev->flags);
1057 } else /* MULTIPATH are always insync */
1058 set_bit(In_sync, &rdev->flags);
1063 * sync_super for 0.90.0
1065 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1069 int next_spare = mddev->raid_disks;
1072 /* make rdev->sb match mddev data..
1075 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1076 * 3/ any empty disks < next_spare become removed
1078 * disks[0] gets initialised to REMOVED because
1079 * we cannot be sure from other fields if it has
1080 * been initialised or not.
1083 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1085 rdev->sb_size = MD_SB_BYTES;
1087 sb = (mdp_super_t*)page_address(rdev->sb_page);
1089 memset(sb, 0, sizeof(*sb));
1091 sb->md_magic = MD_SB_MAGIC;
1092 sb->major_version = mddev->major_version;
1093 sb->patch_version = mddev->patch_version;
1094 sb->gvalid_words = 0; /* ignored */
1095 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1096 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1097 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1098 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1100 sb->ctime = mddev->ctime;
1101 sb->level = mddev->level;
1102 sb->size = mddev->dev_sectors / 2;
1103 sb->raid_disks = mddev->raid_disks;
1104 sb->md_minor = mddev->md_minor;
1105 sb->not_persistent = 0;
1106 sb->utime = mddev->utime;
1108 sb->events_hi = (mddev->events>>32);
1109 sb->events_lo = (u32)mddev->events;
1111 if (mddev->reshape_position == MaxSector)
1112 sb->minor_version = 90;
1114 sb->minor_version = 91;
1115 sb->reshape_position = mddev->reshape_position;
1116 sb->new_level = mddev->new_level;
1117 sb->delta_disks = mddev->delta_disks;
1118 sb->new_layout = mddev->new_layout;
1119 sb->new_chunk = mddev->new_chunk_sectors << 9;
1121 mddev->minor_version = sb->minor_version;
1124 sb->recovery_cp = mddev->recovery_cp;
1125 sb->cp_events_hi = (mddev->events>>32);
1126 sb->cp_events_lo = (u32)mddev->events;
1127 if (mddev->recovery_cp == MaxSector)
1128 sb->state = (1<< MD_SB_CLEAN);
1130 sb->recovery_cp = 0;
1132 sb->layout = mddev->layout;
1133 sb->chunk_size = mddev->chunk_sectors << 9;
1135 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1136 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1138 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1139 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1142 int is_active = test_bit(In_sync, &rdev2->flags);
1144 if (rdev2->raid_disk >= 0 &&
1145 sb->minor_version >= 91)
1146 /* we have nowhere to store the recovery_offset,
1147 * but if it is not below the reshape_position,
1148 * we can piggy-back on that.
1151 if (rdev2->raid_disk < 0 ||
1152 test_bit(Faulty, &rdev2->flags))
1155 desc_nr = rdev2->raid_disk;
1157 desc_nr = next_spare++;
1158 rdev2->desc_nr = desc_nr;
1159 d = &sb->disks[rdev2->desc_nr];
1161 d->number = rdev2->desc_nr;
1162 d->major = MAJOR(rdev2->bdev->bd_dev);
1163 d->minor = MINOR(rdev2->bdev->bd_dev);
1165 d->raid_disk = rdev2->raid_disk;
1167 d->raid_disk = rdev2->desc_nr; /* compatibility */
1168 if (test_bit(Faulty, &rdev2->flags))
1169 d->state = (1<<MD_DISK_FAULTY);
1170 else if (is_active) {
1171 d->state = (1<<MD_DISK_ACTIVE);
1172 if (test_bit(In_sync, &rdev2->flags))
1173 d->state |= (1<<MD_DISK_SYNC);
1181 if (test_bit(WriteMostly, &rdev2->flags))
1182 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1184 /* now set the "removed" and "faulty" bits on any missing devices */
1185 for (i=0 ; i < mddev->raid_disks ; i++) {
1186 mdp_disk_t *d = &sb->disks[i];
1187 if (d->state == 0 && d->number == 0) {
1190 d->state = (1<<MD_DISK_REMOVED);
1191 d->state |= (1<<MD_DISK_FAULTY);
1195 sb->nr_disks = nr_disks;
1196 sb->active_disks = active;
1197 sb->working_disks = working;
1198 sb->failed_disks = failed;
1199 sb->spare_disks = spare;
1201 sb->this_disk = sb->disks[rdev->desc_nr];
1202 sb->sb_csum = calc_sb_csum(sb);
1206 * rdev_size_change for 0.90.0
1208 static unsigned long long
1209 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1211 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1212 return 0; /* component must fit device */
1213 if (rdev->mddev->bitmap_info.offset)
1214 return 0; /* can't move bitmap */
1215 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1216 if (!num_sectors || num_sectors > rdev->sb_start)
1217 num_sectors = rdev->sb_start;
1218 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1220 md_super_wait(rdev->mddev);
1221 return num_sectors / 2; /* kB for sysfs */
1226 * version 1 superblock
1229 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1233 unsigned long long newcsum;
1234 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1235 __le32 *isuper = (__le32*)sb;
1238 disk_csum = sb->sb_csum;
1241 for (i=0; size>=4; size -= 4 )
1242 newcsum += le32_to_cpu(*isuper++);
1245 newcsum += le16_to_cpu(*(__le16*) isuper);
1247 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1248 sb->sb_csum = disk_csum;
1249 return cpu_to_le32(csum);
1252 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1254 struct mdp_superblock_1 *sb;
1257 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1261 * Calculate the position of the superblock in 512byte sectors.
1262 * It is always aligned to a 4K boundary and
1263 * depeding on minor_version, it can be:
1264 * 0: At least 8K, but less than 12K, from end of device
1265 * 1: At start of device
1266 * 2: 4K from start of device.
1268 switch(minor_version) {
1270 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1272 sb_start &= ~(sector_t)(4*2-1);
1283 rdev->sb_start = sb_start;
1285 /* superblock is rarely larger than 1K, but it can be larger,
1286 * and it is safe to read 4k, so we do that
1288 ret = read_disk_sb(rdev, 4096);
1289 if (ret) return ret;
1292 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1294 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1295 sb->major_version != cpu_to_le32(1) ||
1296 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1297 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1298 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1301 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1302 printk("md: invalid superblock checksum on %s\n",
1303 bdevname(rdev->bdev,b));
1306 if (le64_to_cpu(sb->data_size) < 10) {
1307 printk("md: data_size too small on %s\n",
1308 bdevname(rdev->bdev,b));
1312 rdev->preferred_minor = 0xffff;
1313 rdev->data_offset = le64_to_cpu(sb->data_offset);
1314 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1316 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1317 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1318 if (rdev->sb_size & bmask)
1319 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1322 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1325 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1328 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1334 struct mdp_superblock_1 *refsb =
1335 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1337 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1338 sb->level != refsb->level ||
1339 sb->layout != refsb->layout ||
1340 sb->chunksize != refsb->chunksize) {
1341 printk(KERN_WARNING "md: %s has strangely different"
1342 " superblock to %s\n",
1343 bdevname(rdev->bdev,b),
1344 bdevname(refdev->bdev,b2));
1347 ev1 = le64_to_cpu(sb->events);
1348 ev2 = le64_to_cpu(refsb->events);
1356 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1357 le64_to_cpu(sb->data_offset);
1359 rdev->sectors = rdev->sb_start;
1360 if (rdev->sectors < le64_to_cpu(sb->data_size))
1362 rdev->sectors = le64_to_cpu(sb->data_size);
1363 if (le64_to_cpu(sb->size) > rdev->sectors)
1368 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1370 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1371 __u64 ev1 = le64_to_cpu(sb->events);
1373 rdev->raid_disk = -1;
1374 clear_bit(Faulty, &rdev->flags);
1375 clear_bit(In_sync, &rdev->flags);
1376 clear_bit(WriteMostly, &rdev->flags);
1377 clear_bit(BarriersNotsupp, &rdev->flags);
1379 if (mddev->raid_disks == 0) {
1380 mddev->major_version = 1;
1381 mddev->patch_version = 0;
1382 mddev->external = 0;
1383 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1384 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1385 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1386 mddev->level = le32_to_cpu(sb->level);
1387 mddev->clevel[0] = 0;
1388 mddev->layout = le32_to_cpu(sb->layout);
1389 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1390 mddev->dev_sectors = le64_to_cpu(sb->size);
1391 mddev->events = ev1;
1392 mddev->bitmap_info.offset = 0;
1393 mddev->bitmap_info.default_offset = 1024 >> 9;
1395 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1396 memcpy(mddev->uuid, sb->set_uuid, 16);
1398 mddev->max_disks = (4096-256)/2;
1400 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1401 mddev->bitmap_info.file == NULL )
1402 mddev->bitmap_info.offset =
1403 (__s32)le32_to_cpu(sb->bitmap_offset);
1405 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1406 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1407 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1408 mddev->new_level = le32_to_cpu(sb->new_level);
1409 mddev->new_layout = le32_to_cpu(sb->new_layout);
1410 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1412 mddev->reshape_position = MaxSector;
1413 mddev->delta_disks = 0;
1414 mddev->new_level = mddev->level;
1415 mddev->new_layout = mddev->layout;
1416 mddev->new_chunk_sectors = mddev->chunk_sectors;
1419 } else if (mddev->pers == NULL) {
1420 /* Insist of good event counter while assembling */
1422 if (ev1 < mddev->events)
1424 } else if (mddev->bitmap) {
1425 /* If adding to array with a bitmap, then we can accept an
1426 * older device, but not too old.
1428 if (ev1 < mddev->bitmap->events_cleared)
1431 if (ev1 < mddev->events)
1432 /* just a hot-add of a new device, leave raid_disk at -1 */
1435 if (mddev->level != LEVEL_MULTIPATH) {
1437 if (rdev->desc_nr < 0 ||
1438 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1442 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1444 case 0xffff: /* spare */
1446 case 0xfffe: /* faulty */
1447 set_bit(Faulty, &rdev->flags);
1450 if ((le32_to_cpu(sb->feature_map) &
1451 MD_FEATURE_RECOVERY_OFFSET))
1452 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1454 set_bit(In_sync, &rdev->flags);
1455 rdev->raid_disk = role;
1458 if (sb->devflags & WriteMostly1)
1459 set_bit(WriteMostly, &rdev->flags);
1460 } else /* MULTIPATH are always insync */
1461 set_bit(In_sync, &rdev->flags);
1466 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1468 struct mdp_superblock_1 *sb;
1471 /* make rdev->sb match mddev and rdev data. */
1473 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1475 sb->feature_map = 0;
1477 sb->recovery_offset = cpu_to_le64(0);
1478 memset(sb->pad1, 0, sizeof(sb->pad1));
1479 memset(sb->pad2, 0, sizeof(sb->pad2));
1480 memset(sb->pad3, 0, sizeof(sb->pad3));
1482 sb->utime = cpu_to_le64((__u64)mddev->utime);
1483 sb->events = cpu_to_le64(mddev->events);
1485 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1487 sb->resync_offset = cpu_to_le64(0);
1489 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1491 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1492 sb->size = cpu_to_le64(mddev->dev_sectors);
1493 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1494 sb->level = cpu_to_le32(mddev->level);
1495 sb->layout = cpu_to_le32(mddev->layout);
1497 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1498 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1499 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1502 if (rdev->raid_disk >= 0 &&
1503 !test_bit(In_sync, &rdev->flags)) {
1504 if (rdev->recovery_offset > 0) {
1506 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1507 sb->recovery_offset =
1508 cpu_to_le64(rdev->recovery_offset);
1512 if (mddev->reshape_position != MaxSector) {
1513 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1514 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1515 sb->new_layout = cpu_to_le32(mddev->new_layout);
1516 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1517 sb->new_level = cpu_to_le32(mddev->new_level);
1518 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1522 list_for_each_entry(rdev2, &mddev->disks, same_set)
1523 if (rdev2->desc_nr+1 > max_dev)
1524 max_dev = rdev2->desc_nr+1;
1526 if (max_dev > le32_to_cpu(sb->max_dev)) {
1528 sb->max_dev = cpu_to_le32(max_dev);
1529 rdev->sb_size = max_dev * 2 + 256;
1530 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1531 if (rdev->sb_size & bmask)
1532 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1534 for (i=0; i<max_dev;i++)
1535 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1537 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1539 if (test_bit(Faulty, &rdev2->flags))
1540 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1541 else if (test_bit(In_sync, &rdev2->flags))
1542 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1543 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1544 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1546 sb->dev_roles[i] = cpu_to_le16(0xffff);
1549 sb->sb_csum = calc_sb_1_csum(sb);
1552 static unsigned long long
1553 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1555 struct mdp_superblock_1 *sb;
1556 sector_t max_sectors;
1557 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1558 return 0; /* component must fit device */
1559 if (rdev->sb_start < rdev->data_offset) {
1560 /* minor versions 1 and 2; superblock before data */
1561 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1562 max_sectors -= rdev->data_offset;
1563 if (!num_sectors || num_sectors > max_sectors)
1564 num_sectors = max_sectors;
1565 } else if (rdev->mddev->bitmap_info.offset) {
1566 /* minor version 0 with bitmap we can't move */
1569 /* minor version 0; superblock after data */
1571 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1572 sb_start &= ~(sector_t)(4*2 - 1);
1573 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1574 if (!num_sectors || num_sectors > max_sectors)
1575 num_sectors = max_sectors;
1576 rdev->sb_start = sb_start;
1578 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1579 sb->data_size = cpu_to_le64(num_sectors);
1580 sb->super_offset = rdev->sb_start;
1581 sb->sb_csum = calc_sb_1_csum(sb);
1582 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1584 md_super_wait(rdev->mddev);
1585 return num_sectors / 2; /* kB for sysfs */
1588 static struct super_type super_types[] = {
1591 .owner = THIS_MODULE,
1592 .load_super = super_90_load,
1593 .validate_super = super_90_validate,
1594 .sync_super = super_90_sync,
1595 .rdev_size_change = super_90_rdev_size_change,
1599 .owner = THIS_MODULE,
1600 .load_super = super_1_load,
1601 .validate_super = super_1_validate,
1602 .sync_super = super_1_sync,
1603 .rdev_size_change = super_1_rdev_size_change,
1607 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1609 mdk_rdev_t *rdev, *rdev2;
1612 rdev_for_each_rcu(rdev, mddev1)
1613 rdev_for_each_rcu(rdev2, mddev2)
1614 if (rdev->bdev->bd_contains ==
1615 rdev2->bdev->bd_contains) {
1623 static LIST_HEAD(pending_raid_disks);
1626 * Try to register data integrity profile for an mddev
1628 * This is called when an array is started and after a disk has been kicked
1629 * from the array. It only succeeds if all working and active component devices
1630 * are integrity capable with matching profiles.
1632 int md_integrity_register(mddev_t *mddev)
1634 mdk_rdev_t *rdev, *reference = NULL;
1636 if (list_empty(&mddev->disks))
1637 return 0; /* nothing to do */
1638 if (blk_get_integrity(mddev->gendisk))
1639 return 0; /* already registered */
1640 list_for_each_entry(rdev, &mddev->disks, same_set) {
1641 /* skip spares and non-functional disks */
1642 if (test_bit(Faulty, &rdev->flags))
1644 if (rdev->raid_disk < 0)
1647 * If at least one rdev is not integrity capable, we can not
1648 * enable data integrity for the md device.
1650 if (!bdev_get_integrity(rdev->bdev))
1653 /* Use the first rdev as the reference */
1657 /* does this rdev's profile match the reference profile? */
1658 if (blk_integrity_compare(reference->bdev->bd_disk,
1659 rdev->bdev->bd_disk) < 0)
1663 * All component devices are integrity capable and have matching
1664 * profiles, register the common profile for the md device.
1666 if (blk_integrity_register(mddev->gendisk,
1667 bdev_get_integrity(reference->bdev)) != 0) {
1668 printk(KERN_ERR "md: failed to register integrity for %s\n",
1672 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1676 EXPORT_SYMBOL(md_integrity_register);
1678 /* Disable data integrity if non-capable/non-matching disk is being added */
1679 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1681 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1682 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1684 if (!bi_mddev) /* nothing to do */
1686 if (rdev->raid_disk < 0) /* skip spares */
1688 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1689 rdev->bdev->bd_disk) >= 0)
1691 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1692 blk_integrity_unregister(mddev->gendisk);
1694 EXPORT_SYMBOL(md_integrity_add_rdev);
1696 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1698 char b[BDEVNAME_SIZE];
1708 /* prevent duplicates */
1709 if (find_rdev(mddev, rdev->bdev->bd_dev))
1712 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1713 if (rdev->sectors && (mddev->dev_sectors == 0 ||
1714 rdev->sectors < mddev->dev_sectors)) {
1716 /* Cannot change size, so fail
1717 * If mddev->level <= 0, then we don't care
1718 * about aligning sizes (e.g. linear)
1720 if (mddev->level > 0)
1723 mddev->dev_sectors = rdev->sectors;
1726 /* Verify rdev->desc_nr is unique.
1727 * If it is -1, assign a free number, else
1728 * check number is not in use
1730 if (rdev->desc_nr < 0) {
1732 if (mddev->pers) choice = mddev->raid_disks;
1733 while (find_rdev_nr(mddev, choice))
1735 rdev->desc_nr = choice;
1737 if (find_rdev_nr(mddev, rdev->desc_nr))
1740 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1741 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1742 mdname(mddev), mddev->max_disks);
1745 bdevname(rdev->bdev,b);
1746 while ( (s=strchr(b, '/')) != NULL)
1749 rdev->mddev = mddev;
1750 printk(KERN_INFO "md: bind<%s>\n", b);
1752 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1755 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1756 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1757 kobject_del(&rdev->kobj);
1760 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1762 list_add_rcu(&rdev->same_set, &mddev->disks);
1763 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1765 /* May as well allow recovery to be retried once */
1766 mddev->recovery_disabled = 0;
1771 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1776 static void md_delayed_delete(struct work_struct *ws)
1778 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1779 kobject_del(&rdev->kobj);
1780 kobject_put(&rdev->kobj);
1783 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1785 char b[BDEVNAME_SIZE];
1790 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1791 list_del_rcu(&rdev->same_set);
1792 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1794 sysfs_remove_link(&rdev->kobj, "block");
1795 sysfs_put(rdev->sysfs_state);
1796 rdev->sysfs_state = NULL;
1797 /* We need to delay this, otherwise we can deadlock when
1798 * writing to 'remove' to "dev/state". We also need
1799 * to delay it due to rcu usage.
1802 INIT_WORK(&rdev->del_work, md_delayed_delete);
1803 kobject_get(&rdev->kobj);
1804 schedule_work(&rdev->del_work);
1808 * prevent the device from being mounted, repartitioned or
1809 * otherwise reused by a RAID array (or any other kernel
1810 * subsystem), by bd_claiming the device.
1812 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1815 struct block_device *bdev;
1816 char b[BDEVNAME_SIZE];
1818 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1820 printk(KERN_ERR "md: could not open %s.\n",
1821 __bdevname(dev, b));
1822 return PTR_ERR(bdev);
1824 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1826 printk(KERN_ERR "md: could not bd_claim %s.\n",
1828 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1832 set_bit(AllReserved, &rdev->flags);
1837 static void unlock_rdev(mdk_rdev_t *rdev)
1839 struct block_device *bdev = rdev->bdev;
1844 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1847 void md_autodetect_dev(dev_t dev);
1849 static void export_rdev(mdk_rdev_t * rdev)
1851 char b[BDEVNAME_SIZE];
1852 printk(KERN_INFO "md: export_rdev(%s)\n",
1853 bdevname(rdev->bdev,b));
1858 if (test_bit(AutoDetected, &rdev->flags))
1859 md_autodetect_dev(rdev->bdev->bd_dev);
1862 kobject_put(&rdev->kobj);
1865 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1867 unbind_rdev_from_array(rdev);
1871 static void export_array(mddev_t *mddev)
1873 mdk_rdev_t *rdev, *tmp;
1875 rdev_for_each(rdev, tmp, mddev) {
1880 kick_rdev_from_array(rdev);
1882 if (!list_empty(&mddev->disks))
1884 mddev->raid_disks = 0;
1885 mddev->major_version = 0;
1888 static void print_desc(mdp_disk_t *desc)
1890 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1891 desc->major,desc->minor,desc->raid_disk,desc->state);
1894 static void print_sb_90(mdp_super_t *sb)
1899 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1900 sb->major_version, sb->minor_version, sb->patch_version,
1901 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1903 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1904 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1905 sb->md_minor, sb->layout, sb->chunk_size);
1906 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1907 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1908 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1909 sb->failed_disks, sb->spare_disks,
1910 sb->sb_csum, (unsigned long)sb->events_lo);
1913 for (i = 0; i < MD_SB_DISKS; i++) {
1916 desc = sb->disks + i;
1917 if (desc->number || desc->major || desc->minor ||
1918 desc->raid_disk || (desc->state && (desc->state != 4))) {
1919 printk(" D %2d: ", i);
1923 printk(KERN_INFO "md: THIS: ");
1924 print_desc(&sb->this_disk);
1927 static void print_sb_1(struct mdp_superblock_1 *sb)
1931 uuid = sb->set_uuid;
1933 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1934 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1935 "md: Name: \"%s\" CT:%llu\n",
1936 le32_to_cpu(sb->major_version),
1937 le32_to_cpu(sb->feature_map),
1938 uuid[0], uuid[1], uuid[2], uuid[3],
1939 uuid[4], uuid[5], uuid[6], uuid[7],
1940 uuid[8], uuid[9], uuid[10], uuid[11],
1941 uuid[12], uuid[13], uuid[14], uuid[15],
1943 (unsigned long long)le64_to_cpu(sb->ctime)
1944 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1946 uuid = sb->device_uuid;
1948 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1950 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1951 ":%02x%02x%02x%02x%02x%02x\n"
1952 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1953 "md: (MaxDev:%u) \n",
1954 le32_to_cpu(sb->level),
1955 (unsigned long long)le64_to_cpu(sb->size),
1956 le32_to_cpu(sb->raid_disks),
1957 le32_to_cpu(sb->layout),
1958 le32_to_cpu(sb->chunksize),
1959 (unsigned long long)le64_to_cpu(sb->data_offset),
1960 (unsigned long long)le64_to_cpu(sb->data_size),
1961 (unsigned long long)le64_to_cpu(sb->super_offset),
1962 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1963 le32_to_cpu(sb->dev_number),
1964 uuid[0], uuid[1], uuid[2], uuid[3],
1965 uuid[4], uuid[5], uuid[6], uuid[7],
1966 uuid[8], uuid[9], uuid[10], uuid[11],
1967 uuid[12], uuid[13], uuid[14], uuid[15],
1969 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1970 (unsigned long long)le64_to_cpu(sb->events),
1971 (unsigned long long)le64_to_cpu(sb->resync_offset),
1972 le32_to_cpu(sb->sb_csum),
1973 le32_to_cpu(sb->max_dev)
1977 static void print_rdev(mdk_rdev_t *rdev, int major_version)
1979 char b[BDEVNAME_SIZE];
1980 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1981 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
1982 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1984 if (rdev->sb_loaded) {
1985 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
1986 switch (major_version) {
1988 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
1991 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
1995 printk(KERN_INFO "md: no rdev superblock!\n");
1998 static void md_print_devices(void)
2000 struct list_head *tmp;
2003 char b[BDEVNAME_SIZE];
2006 printk("md: **********************************\n");
2007 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
2008 printk("md: **********************************\n");
2009 for_each_mddev(mddev, tmp) {
2012 bitmap_print_sb(mddev->bitmap);
2014 printk("%s: ", mdname(mddev));
2015 list_for_each_entry(rdev, &mddev->disks, same_set)
2016 printk("<%s>", bdevname(rdev->bdev,b));
2019 list_for_each_entry(rdev, &mddev->disks, same_set)
2020 print_rdev(rdev, mddev->major_version);
2022 printk("md: **********************************\n");
2027 static void sync_sbs(mddev_t * mddev, int nospares)
2029 /* Update each superblock (in-memory image), but
2030 * if we are allowed to, skip spares which already
2031 * have the right event counter, or have one earlier
2032 * (which would mean they aren't being marked as dirty
2033 * with the rest of the array)
2037 /* First make sure individual recovery_offsets are correct */
2038 list_for_each_entry(rdev, &mddev->disks, same_set) {
2039 if (rdev->raid_disk >= 0 &&
2040 !test_bit(In_sync, &rdev->flags) &&
2041 mddev->curr_resync_completed > rdev->recovery_offset)
2042 rdev->recovery_offset = mddev->curr_resync_completed;
2045 list_for_each_entry(rdev, &mddev->disks, same_set) {
2046 if (rdev->sb_events == mddev->events ||
2048 rdev->raid_disk < 0 &&
2049 (rdev->sb_events&1)==0 &&
2050 rdev->sb_events+1 == mddev->events)) {
2051 /* Don't update this superblock */
2052 rdev->sb_loaded = 2;
2054 super_types[mddev->major_version].
2055 sync_super(mddev, rdev);
2056 rdev->sb_loaded = 1;
2061 static void md_update_sb(mddev_t * mddev, int force_change)
2067 mddev->utime = get_seconds();
2068 if (mddev->external)
2071 spin_lock_irq(&mddev->write_lock);
2073 set_bit(MD_CHANGE_PENDING, &mddev->flags);
2074 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2076 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2077 /* just a clean<-> dirty transition, possibly leave spares alone,
2078 * though if events isn't the right even/odd, we will have to do
2084 if (mddev->degraded)
2085 /* If the array is degraded, then skipping spares is both
2086 * dangerous and fairly pointless.
2087 * Dangerous because a device that was removed from the array
2088 * might have a event_count that still looks up-to-date,
2089 * so it can be re-added without a resync.
2090 * Pointless because if there are any spares to skip,
2091 * then a recovery will happen and soon that array won't
2092 * be degraded any more and the spare can go back to sleep then.
2096 sync_req = mddev->in_sync;
2098 /* If this is just a dirty<->clean transition, and the array is clean
2099 * and 'events' is odd, we can roll back to the previous clean state */
2101 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2102 && (mddev->events & 1)
2103 && mddev->events != 1)
2106 /* otherwise we have to go forward and ... */
2108 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
2109 /* .. if the array isn't clean, an 'even' event must also go
2111 if ((mddev->events&1)==0)
2114 /* otherwise an 'odd' event must go to spares */
2115 if ((mddev->events&1))
2120 if (!mddev->events) {
2122 * oops, this 64-bit counter should never wrap.
2123 * Either we are in around ~1 trillion A.C., assuming
2124 * 1 reboot per second, or we have a bug:
2131 * do not write anything to disk if using
2132 * nonpersistent superblocks
2134 if (!mddev->persistent) {
2135 if (!mddev->external)
2136 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2138 spin_unlock_irq(&mddev->write_lock);
2139 wake_up(&mddev->sb_wait);
2142 sync_sbs(mddev, nospares);
2143 spin_unlock_irq(&mddev->write_lock);
2146 "md: updating %s RAID superblock on device (in sync %d)\n",
2147 mdname(mddev),mddev->in_sync);
2149 bitmap_update_sb(mddev->bitmap);
2150 list_for_each_entry(rdev, &mddev->disks, same_set) {
2151 char b[BDEVNAME_SIZE];
2152 dprintk(KERN_INFO "md: ");
2153 if (rdev->sb_loaded != 1)
2154 continue; /* no noise on spare devices */
2155 if (test_bit(Faulty, &rdev->flags))
2156 dprintk("(skipping faulty ");
2158 dprintk("%s ", bdevname(rdev->bdev,b));
2159 if (!test_bit(Faulty, &rdev->flags)) {
2160 md_super_write(mddev,rdev,
2161 rdev->sb_start, rdev->sb_size,
2163 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2164 bdevname(rdev->bdev,b),
2165 (unsigned long long)rdev->sb_start);
2166 rdev->sb_events = mddev->events;
2170 if (mddev->level == LEVEL_MULTIPATH)
2171 /* only need to write one superblock... */
2174 md_super_wait(mddev);
2175 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2177 spin_lock_irq(&mddev->write_lock);
2178 if (mddev->in_sync != sync_req ||
2179 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2180 /* have to write it out again */
2181 spin_unlock_irq(&mddev->write_lock);
2184 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2185 spin_unlock_irq(&mddev->write_lock);
2186 wake_up(&mddev->sb_wait);
2187 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2188 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2192 /* words written to sysfs files may, or may not, be \n terminated.
2193 * We want to accept with case. For this we use cmd_match.
2195 static int cmd_match(const char *cmd, const char *str)
2197 /* See if cmd, written into a sysfs file, matches
2198 * str. They must either be the same, or cmd can
2199 * have a trailing newline
2201 while (*cmd && *str && *cmd == *str) {
2212 struct rdev_sysfs_entry {
2213 struct attribute attr;
2214 ssize_t (*show)(mdk_rdev_t *, char *);
2215 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2219 state_show(mdk_rdev_t *rdev, char *page)
2224 if (test_bit(Faulty, &rdev->flags)) {
2225 len+= sprintf(page+len, "%sfaulty",sep);
2228 if (test_bit(In_sync, &rdev->flags)) {
2229 len += sprintf(page+len, "%sin_sync",sep);
2232 if (test_bit(WriteMostly, &rdev->flags)) {
2233 len += sprintf(page+len, "%swrite_mostly",sep);
2236 if (test_bit(Blocked, &rdev->flags)) {
2237 len += sprintf(page+len, "%sblocked", sep);
2240 if (!test_bit(Faulty, &rdev->flags) &&
2241 !test_bit(In_sync, &rdev->flags)) {
2242 len += sprintf(page+len, "%sspare", sep);
2245 return len+sprintf(page+len, "\n");
2249 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2252 * faulty - simulates and error
2253 * remove - disconnects the device
2254 * writemostly - sets write_mostly
2255 * -writemostly - clears write_mostly
2256 * blocked - sets the Blocked flag
2257 * -blocked - clears the Blocked flag
2258 * insync - sets Insync providing device isn't active
2261 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2262 md_error(rdev->mddev, rdev);
2264 } else if (cmd_match(buf, "remove")) {
2265 if (rdev->raid_disk >= 0)
2268 mddev_t *mddev = rdev->mddev;
2269 kick_rdev_from_array(rdev);
2271 md_update_sb(mddev, 1);
2272 md_new_event(mddev);
2275 } else if (cmd_match(buf, "writemostly")) {
2276 set_bit(WriteMostly, &rdev->flags);
2278 } else if (cmd_match(buf, "-writemostly")) {
2279 clear_bit(WriteMostly, &rdev->flags);
2281 } else if (cmd_match(buf, "blocked")) {
2282 set_bit(Blocked, &rdev->flags);
2284 } else if (cmd_match(buf, "-blocked")) {
2285 clear_bit(Blocked, &rdev->flags);
2286 wake_up(&rdev->blocked_wait);
2287 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2288 md_wakeup_thread(rdev->mddev->thread);
2291 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2292 set_bit(In_sync, &rdev->flags);
2295 if (!err && rdev->sysfs_state)
2296 sysfs_notify_dirent(rdev->sysfs_state);
2297 return err ? err : len;
2299 static struct rdev_sysfs_entry rdev_state =
2300 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2303 errors_show(mdk_rdev_t *rdev, char *page)
2305 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2309 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2312 unsigned long n = simple_strtoul(buf, &e, 10);
2313 if (*buf && (*e == 0 || *e == '\n')) {
2314 atomic_set(&rdev->corrected_errors, n);
2319 static struct rdev_sysfs_entry rdev_errors =
2320 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2323 slot_show(mdk_rdev_t *rdev, char *page)
2325 if (rdev->raid_disk < 0)
2326 return sprintf(page, "none\n");
2328 return sprintf(page, "%d\n", rdev->raid_disk);
2332 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2337 int slot = simple_strtoul(buf, &e, 10);
2338 if (strncmp(buf, "none", 4)==0)
2340 else if (e==buf || (*e && *e!= '\n'))
2342 if (rdev->mddev->pers && slot == -1) {
2343 /* Setting 'slot' on an active array requires also
2344 * updating the 'rd%d' link, and communicating
2345 * with the personality with ->hot_*_disk.
2346 * For now we only support removing
2347 * failed/spare devices. This normally happens automatically,
2348 * but not when the metadata is externally managed.
2350 if (rdev->raid_disk == -1)
2352 /* personality does all needed checks */
2353 if (rdev->mddev->pers->hot_add_disk == NULL)
2355 err = rdev->mddev->pers->
2356 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2359 sprintf(nm, "rd%d", rdev->raid_disk);
2360 sysfs_remove_link(&rdev->mddev->kobj, nm);
2361 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2362 md_wakeup_thread(rdev->mddev->thread);
2363 } else if (rdev->mddev->pers) {
2365 /* Activating a spare .. or possibly reactivating
2366 * if we ever get bitmaps working here.
2369 if (rdev->raid_disk != -1)
2372 if (rdev->mddev->pers->hot_add_disk == NULL)
2375 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2376 if (rdev2->raid_disk == slot)
2379 rdev->raid_disk = slot;
2380 if (test_bit(In_sync, &rdev->flags))
2381 rdev->saved_raid_disk = slot;
2383 rdev->saved_raid_disk = -1;
2384 err = rdev->mddev->pers->
2385 hot_add_disk(rdev->mddev, rdev);
2387 rdev->raid_disk = -1;
2390 sysfs_notify_dirent(rdev->sysfs_state);
2391 sprintf(nm, "rd%d", rdev->raid_disk);
2392 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2394 "md: cannot register "
2396 nm, mdname(rdev->mddev));
2398 /* don't wakeup anyone, leave that to userspace. */
2400 if (slot >= rdev->mddev->raid_disks)
2402 rdev->raid_disk = slot;
2403 /* assume it is working */
2404 clear_bit(Faulty, &rdev->flags);
2405 clear_bit(WriteMostly, &rdev->flags);
2406 set_bit(In_sync, &rdev->flags);
2407 sysfs_notify_dirent(rdev->sysfs_state);
2413 static struct rdev_sysfs_entry rdev_slot =
2414 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2417 offset_show(mdk_rdev_t *rdev, char *page)
2419 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2423 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2426 unsigned long long offset = simple_strtoull(buf, &e, 10);
2427 if (e==buf || (*e && *e != '\n'))
2429 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2431 if (rdev->sectors && rdev->mddev->external)
2432 /* Must set offset before size, so overlap checks
2435 rdev->data_offset = offset;
2439 static struct rdev_sysfs_entry rdev_offset =
2440 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2443 rdev_size_show(mdk_rdev_t *rdev, char *page)
2445 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2448 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2450 /* check if two start/length pairs overlap */
2458 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2460 unsigned long long blocks;
2463 if (strict_strtoull(buf, 10, &blocks) < 0)
2466 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2467 return -EINVAL; /* sector conversion overflow */
2470 if (new != blocks * 2)
2471 return -EINVAL; /* unsigned long long to sector_t overflow */
2478 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2480 mddev_t *my_mddev = rdev->mddev;
2481 sector_t oldsectors = rdev->sectors;
2484 if (strict_blocks_to_sectors(buf, §ors) < 0)
2486 if (my_mddev->pers && rdev->raid_disk >= 0) {
2487 if (my_mddev->persistent) {
2488 sectors = super_types[my_mddev->major_version].
2489 rdev_size_change(rdev, sectors);
2492 } else if (!sectors)
2493 sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2496 if (sectors < my_mddev->dev_sectors)
2497 return -EINVAL; /* component must fit device */
2499 rdev->sectors = sectors;
2500 if (sectors > oldsectors && my_mddev->external) {
2501 /* need to check that all other rdevs with the same ->bdev
2502 * do not overlap. We need to unlock the mddev to avoid
2503 * a deadlock. We have already changed rdev->sectors, and if
2504 * we have to change it back, we will have the lock again.
2508 struct list_head *tmp;
2510 mddev_unlock(my_mddev);
2511 for_each_mddev(mddev, tmp) {
2515 list_for_each_entry(rdev2, &mddev->disks, same_set)
2516 if (test_bit(AllReserved, &rdev2->flags) ||
2517 (rdev->bdev == rdev2->bdev &&
2519 overlaps(rdev->data_offset, rdev->sectors,
2525 mddev_unlock(mddev);
2531 mddev_lock(my_mddev);
2533 /* Someone else could have slipped in a size
2534 * change here, but doing so is just silly.
2535 * We put oldsectors back because we *know* it is
2536 * safe, and trust userspace not to race with
2539 rdev->sectors = oldsectors;
2546 static struct rdev_sysfs_entry rdev_size =
2547 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2549 static struct attribute *rdev_default_attrs[] = {
2558 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2560 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2561 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2562 mddev_t *mddev = rdev->mddev;
2568 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2570 if (rdev->mddev == NULL)
2573 rv = entry->show(rdev, page);
2574 mddev_unlock(mddev);
2580 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2581 const char *page, size_t length)
2583 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2584 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2586 mddev_t *mddev = rdev->mddev;
2590 if (!capable(CAP_SYS_ADMIN))
2592 rv = mddev ? mddev_lock(mddev): -EBUSY;
2594 if (rdev->mddev == NULL)
2597 rv = entry->store(rdev, page, length);
2598 mddev_unlock(mddev);
2603 static void rdev_free(struct kobject *ko)
2605 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2608 static struct sysfs_ops rdev_sysfs_ops = {
2609 .show = rdev_attr_show,
2610 .store = rdev_attr_store,
2612 static struct kobj_type rdev_ktype = {
2613 .release = rdev_free,
2614 .sysfs_ops = &rdev_sysfs_ops,
2615 .default_attrs = rdev_default_attrs,
2619 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2621 * mark the device faulty if:
2623 * - the device is nonexistent (zero size)
2624 * - the device has no valid superblock
2626 * a faulty rdev _never_ has rdev->sb set.
2628 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2630 char b[BDEVNAME_SIZE];
2635 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2637 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2638 return ERR_PTR(-ENOMEM);
2641 if ((err = alloc_disk_sb(rdev)))
2644 err = lock_rdev(rdev, newdev, super_format == -2);
2648 kobject_init(&rdev->kobj, &rdev_ktype);
2651 rdev->saved_raid_disk = -1;
2652 rdev->raid_disk = -1;
2654 rdev->data_offset = 0;
2655 rdev->sb_events = 0;
2656 atomic_set(&rdev->nr_pending, 0);
2657 atomic_set(&rdev->read_errors, 0);
2658 atomic_set(&rdev->corrected_errors, 0);
2660 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2663 "md: %s has zero or unknown size, marking faulty!\n",
2664 bdevname(rdev->bdev,b));
2669 if (super_format >= 0) {
2670 err = super_types[super_format].
2671 load_super(rdev, NULL, super_minor);
2672 if (err == -EINVAL) {
2674 "md: %s does not have a valid v%d.%d "
2675 "superblock, not importing!\n",
2676 bdevname(rdev->bdev,b),
2677 super_format, super_minor);
2682 "md: could not read %s's sb, not importing!\n",
2683 bdevname(rdev->bdev,b));
2688 INIT_LIST_HEAD(&rdev->same_set);
2689 init_waitqueue_head(&rdev->blocked_wait);
2694 if (rdev->sb_page) {
2700 return ERR_PTR(err);
2704 * Check a full RAID array for plausibility
2708 static void analyze_sbs(mddev_t * mddev)
2711 mdk_rdev_t *rdev, *freshest, *tmp;
2712 char b[BDEVNAME_SIZE];
2715 rdev_for_each(rdev, tmp, mddev)
2716 switch (super_types[mddev->major_version].
2717 load_super(rdev, freshest, mddev->minor_version)) {
2725 "md: fatal superblock inconsistency in %s"
2726 " -- removing from array\n",
2727 bdevname(rdev->bdev,b));
2728 kick_rdev_from_array(rdev);
2732 super_types[mddev->major_version].
2733 validate_super(mddev, freshest);
2736 rdev_for_each(rdev, tmp, mddev) {
2737 if (rdev->desc_nr >= mddev->max_disks ||
2738 i > mddev->max_disks) {
2740 "md: %s: %s: only %d devices permitted\n",
2741 mdname(mddev), bdevname(rdev->bdev, b),
2743 kick_rdev_from_array(rdev);
2746 if (rdev != freshest)
2747 if (super_types[mddev->major_version].
2748 validate_super(mddev, rdev)) {
2749 printk(KERN_WARNING "md: kicking non-fresh %s"
2751 bdevname(rdev->bdev,b));
2752 kick_rdev_from_array(rdev);
2755 if (mddev->level == LEVEL_MULTIPATH) {
2756 rdev->desc_nr = i++;
2757 rdev->raid_disk = rdev->desc_nr;
2758 set_bit(In_sync, &rdev->flags);
2759 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2760 rdev->raid_disk = -1;
2761 clear_bit(In_sync, &rdev->flags);
2766 /* Read a fixed-point number.
2767 * Numbers in sysfs attributes should be in "standard" units where
2768 * possible, so time should be in seconds.
2769 * However we internally use a a much smaller unit such as
2770 * milliseconds or jiffies.
2771 * This function takes a decimal number with a possible fractional
2772 * component, and produces an integer which is the result of
2773 * multiplying that number by 10^'scale'.
2774 * all without any floating-point arithmetic.
2776 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2778 unsigned long result = 0;
2780 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2783 else if (decimals < scale) {
2786 result = result * 10 + value;
2798 while (decimals < scale) {
2807 static void md_safemode_timeout(unsigned long data);
2810 safe_delay_show(mddev_t *mddev, char *page)
2812 int msec = (mddev->safemode_delay*1000)/HZ;
2813 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2816 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2820 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2823 mddev->safemode_delay = 0;
2825 unsigned long old_delay = mddev->safemode_delay;
2826 mddev->safemode_delay = (msec*HZ)/1000;
2827 if (mddev->safemode_delay == 0)
2828 mddev->safemode_delay = 1;
2829 if (mddev->safemode_delay < old_delay)
2830 md_safemode_timeout((unsigned long)mddev);
2834 static struct md_sysfs_entry md_safe_delay =
2835 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2838 level_show(mddev_t *mddev, char *page)
2840 struct mdk_personality *p = mddev->pers;
2842 return sprintf(page, "%s\n", p->name);
2843 else if (mddev->clevel[0])
2844 return sprintf(page, "%s\n", mddev->clevel);
2845 else if (mddev->level != LEVEL_NONE)
2846 return sprintf(page, "%d\n", mddev->level);
2852 level_store(mddev_t *mddev, const char *buf, size_t len)
2856 struct mdk_personality *pers;
2860 if (mddev->pers == NULL) {
2863 if (len >= sizeof(mddev->clevel))
2865 strncpy(mddev->clevel, buf, len);
2866 if (mddev->clevel[len-1] == '\n')
2868 mddev->clevel[len] = 0;
2869 mddev->level = LEVEL_NONE;
2873 /* request to change the personality. Need to ensure:
2874 * - array is not engaged in resync/recovery/reshape
2875 * - old personality can be suspended
2876 * - new personality will access other array.
2879 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2882 if (!mddev->pers->quiesce) {
2883 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2884 mdname(mddev), mddev->pers->name);
2888 /* Now find the new personality */
2889 if (len == 0 || len >= sizeof(level))
2891 strncpy(level, buf, len);
2892 if (level[len-1] == '\n')
2896 request_module("md-%s", level);
2897 spin_lock(&pers_lock);
2898 pers = find_pers(LEVEL_NONE, level);
2899 if (!pers || !try_module_get(pers->owner)) {
2900 spin_unlock(&pers_lock);
2901 printk(KERN_WARNING "md: personality %s not loaded\n", level);
2904 spin_unlock(&pers_lock);
2906 if (pers == mddev->pers) {
2907 /* Nothing to do! */
2908 module_put(pers->owner);
2911 if (!pers->takeover) {
2912 module_put(pers->owner);
2913 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
2914 mdname(mddev), level);
2918 /* ->takeover must set new_* and/or delta_disks
2919 * if it succeeds, and may set them when it fails.
2921 priv = pers->takeover(mddev);
2923 mddev->new_level = mddev->level;
2924 mddev->new_layout = mddev->layout;
2925 mddev->new_chunk_sectors = mddev->chunk_sectors;
2926 mddev->raid_disks -= mddev->delta_disks;
2927 mddev->delta_disks = 0;
2928 module_put(pers->owner);
2929 printk(KERN_WARNING "md: %s: %s would not accept array\n",
2930 mdname(mddev), level);
2931 return PTR_ERR(priv);
2934 /* Looks like we have a winner */
2935 mddev_suspend(mddev);
2936 mddev->pers->stop(mddev);
2937 module_put(mddev->pers->owner);
2938 /* Invalidate devices that are now superfluous */
2939 list_for_each_entry(rdev, &mddev->disks, same_set)
2940 if (rdev->raid_disk >= mddev->raid_disks) {
2941 rdev->raid_disk = -1;
2942 clear_bit(In_sync, &rdev->flags);
2945 mddev->private = priv;
2946 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2947 mddev->level = mddev->new_level;
2948 mddev->layout = mddev->new_layout;
2949 mddev->chunk_sectors = mddev->new_chunk_sectors;
2950 mddev->delta_disks = 0;
2952 mddev_resume(mddev);
2953 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2954 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2955 md_wakeup_thread(mddev->thread);
2959 static struct md_sysfs_entry md_level =
2960 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2964 layout_show(mddev_t *mddev, char *page)
2966 /* just a number, not meaningful for all levels */
2967 if (mddev->reshape_position != MaxSector &&
2968 mddev->layout != mddev->new_layout)
2969 return sprintf(page, "%d (%d)\n",
2970 mddev->new_layout, mddev->layout);
2971 return sprintf(page, "%d\n", mddev->layout);
2975 layout_store(mddev_t *mddev, const char *buf, size_t len)
2978 unsigned long n = simple_strtoul(buf, &e, 10);
2980 if (!*buf || (*e && *e != '\n'))
2985 if (mddev->pers->check_reshape == NULL)
2987 mddev->new_layout = n;
2988 err = mddev->pers->check_reshape(mddev);
2990 mddev->new_layout = mddev->layout;
2994 mddev->new_layout = n;
2995 if (mddev->reshape_position == MaxSector)
3000 static struct md_sysfs_entry md_layout =
3001 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3005 raid_disks_show(mddev_t *mddev, char *page)
3007 if (mddev->raid_disks == 0)
3009 if (mddev->reshape_position != MaxSector &&
3010 mddev->delta_disks != 0)
3011 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3012 mddev->raid_disks - mddev->delta_disks);
3013 return sprintf(page, "%d\n", mddev->raid_disks);
3016 static int update_raid_disks(mddev_t *mddev, int raid_disks);
3019 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
3023 unsigned long n = simple_strtoul(buf, &e, 10);
3025 if (!*buf || (*e && *e != '\n'))
3029 rv = update_raid_disks(mddev, n);
3030 else if (mddev->reshape_position != MaxSector) {
3031 int olddisks = mddev->raid_disks - mddev->delta_disks;
3032 mddev->delta_disks = n - olddisks;
3033 mddev->raid_disks = n;
3035 mddev->raid_disks = n;
3036 return rv ? rv : len;
3038 static struct md_sysfs_entry md_raid_disks =
3039 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3042 chunk_size_show(mddev_t *mddev, char *page)
3044 if (mddev->reshape_position != MaxSector &&
3045 mddev->chunk_sectors != mddev->new_chunk_sectors)
3046 return sprintf(page, "%d (%d)\n",
3047 mddev->new_chunk_sectors << 9,
3048 mddev->chunk_sectors << 9);
3049 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3053 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
3056 unsigned long n = simple_strtoul(buf, &e, 10);
3058 if (!*buf || (*e && *e != '\n'))
3063 if (mddev->pers->check_reshape == NULL)
3065 mddev->new_chunk_sectors = n >> 9;
3066 err = mddev->pers->check_reshape(mddev);
3068 mddev->new_chunk_sectors = mddev->chunk_sectors;
3072 mddev->new_chunk_sectors = n >> 9;
3073 if (mddev->reshape_position == MaxSector)
3074 mddev->chunk_sectors = n >> 9;
3078 static struct md_sysfs_entry md_chunk_size =
3079 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3082 resync_start_show(mddev_t *mddev, char *page)
3084 if (mddev->recovery_cp == MaxSector)
3085 return sprintf(page, "none\n");
3086 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3090 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
3093 unsigned long long n = simple_strtoull(buf, &e, 10);
3097 if (!*buf || (*e && *e != '\n'))
3100 mddev->recovery_cp = n;
3103 static struct md_sysfs_entry md_resync_start =
3104 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3107 * The array state can be:
3110 * No devices, no size, no level
3111 * Equivalent to STOP_ARRAY ioctl
3113 * May have some settings, but array is not active
3114 * all IO results in error
3115 * When written, doesn't tear down array, but just stops it
3116 * suspended (not supported yet)
3117 * All IO requests will block. The array can be reconfigured.
3118 * Writing this, if accepted, will block until array is quiescent
3120 * no resync can happen. no superblocks get written.
3121 * write requests fail
3123 * like readonly, but behaves like 'clean' on a write request.
3125 * clean - no pending writes, but otherwise active.
3126 * When written to inactive array, starts without resync
3127 * If a write request arrives then
3128 * if metadata is known, mark 'dirty' and switch to 'active'.
3129 * if not known, block and switch to write-pending
3130 * If written to an active array that has pending writes, then fails.
3132 * fully active: IO and resync can be happening.
3133 * When written to inactive array, starts with resync
3136 * clean, but writes are blocked waiting for 'active' to be written.
3139 * like active, but no writes have been seen for a while (100msec).
3142 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3143 write_pending, active_idle, bad_word};
3144 static char *array_states[] = {
3145 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3146 "write-pending", "active-idle", NULL };
3148 static int match_word(const char *word, char **list)
3151 for (n=0; list[n]; n++)
3152 if (cmd_match(word, list[n]))
3158 array_state_show(mddev_t *mddev, char *page)
3160 enum array_state st = inactive;
3173 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3175 else if (mddev->safemode)
3181 if (list_empty(&mddev->disks) &&
3182 mddev->raid_disks == 0 &&
3183 mddev->dev_sectors == 0)
3188 return sprintf(page, "%s\n", array_states[st]);
3191 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3192 static int do_md_run(mddev_t * mddev);
3193 static int restart_array(mddev_t *mddev);
3196 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3199 enum array_state st = match_word(buf, array_states);
3204 /* stopping an active array */
3205 if (atomic_read(&mddev->openers) > 0)
3207 err = do_md_stop(mddev, 0, 0);
3210 /* stopping an active array */
3212 if (atomic_read(&mddev->openers) > 0)
3214 err = do_md_stop(mddev, 2, 0);
3216 err = 0; /* already inactive */
3219 break; /* not supported yet */
3222 err = do_md_stop(mddev, 1, 0);
3225 set_disk_ro(mddev->gendisk, 1);
3226 err = do_md_run(mddev);
3232 err = do_md_stop(mddev, 1, 0);
3233 else if (mddev->ro == 1)
3234 err = restart_array(mddev);
3237 set_disk_ro(mddev->gendisk, 0);
3241 err = do_md_run(mddev);
3246 restart_array(mddev);
3247 spin_lock_irq(&mddev->write_lock);
3248 if (atomic_read(&mddev->writes_pending) == 0) {
3249 if (mddev->in_sync == 0) {
3251 if (mddev->safemode == 1)
3252 mddev->safemode = 0;
3253 if (mddev->persistent)
3254 set_bit(MD_CHANGE_CLEAN,
3260 spin_unlock_irq(&mddev->write_lock);
3266 restart_array(mddev);
3267 if (mddev->external)
3268 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3269 wake_up(&mddev->sb_wait);
3273 set_disk_ro(mddev->gendisk, 0);
3274 err = do_md_run(mddev);
3279 /* these cannot be set */
3285 sysfs_notify_dirent(mddev->sysfs_state);
3289 static struct md_sysfs_entry md_array_state =
3290 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3293 null_show(mddev_t *mddev, char *page)
3299 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3301 /* buf must be %d:%d\n? giving major and minor numbers */
3302 /* The new device is added to the array.
3303 * If the array has a persistent superblock, we read the
3304 * superblock to initialise info and check validity.
3305 * Otherwise, only checking done is that in bind_rdev_to_array,
3306 * which mainly checks size.
3309 int major = simple_strtoul(buf, &e, 10);
3315 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3317 minor = simple_strtoul(e+1, &e, 10);
3318 if (*e && *e != '\n')
3320 dev = MKDEV(major, minor);
3321 if (major != MAJOR(dev) ||
3322 minor != MINOR(dev))
3326 if (mddev->persistent) {
3327 rdev = md_import_device(dev, mddev->major_version,
3328 mddev->minor_version);
3329 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3330 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3331 mdk_rdev_t, same_set);
3332 err = super_types[mddev->major_version]
3333 .load_super(rdev, rdev0, mddev->minor_version);
3337 } else if (mddev->external)
3338 rdev = md_import_device(dev, -2, -1);
3340 rdev = md_import_device(dev, -1, -1);
3343 return PTR_ERR(rdev);
3344 err = bind_rdev_to_array(rdev, mddev);
3348 return err ? err : len;
3351 static struct md_sysfs_entry md_new_device =
3352 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3355 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3358 unsigned long chunk, end_chunk;
3362 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3364 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3365 if (buf == end) break;
3366 if (*end == '-') { /* range */
3368 end_chunk = simple_strtoul(buf, &end, 0);
3369 if (buf == end) break;
3371 if (*end && !isspace(*end)) break;
3372 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3374 while (isspace(*buf)) buf++;
3376 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3381 static struct md_sysfs_entry md_bitmap =
3382 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3385 size_show(mddev_t *mddev, char *page)
3387 return sprintf(page, "%llu\n",
3388 (unsigned long long)mddev->dev_sectors / 2);
3391 static int update_size(mddev_t *mddev, sector_t num_sectors);
3394 size_store(mddev_t *mddev, const char *buf, size_t len)
3396 /* If array is inactive, we can reduce the component size, but
3397 * not increase it (except from 0).
3398 * If array is active, we can try an on-line resize
3401 int err = strict_blocks_to_sectors(buf, §ors);
3406 err = update_size(mddev, sectors);
3407 md_update_sb(mddev, 1);
3409 if (mddev->dev_sectors == 0 ||
3410 mddev->dev_sectors > sectors)
3411 mddev->dev_sectors = sectors;
3415 return err ? err : len;
3418 static struct md_sysfs_entry md_size =
3419 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3424 * 'none' for arrays with no metadata (good luck...)
3425 * 'external' for arrays with externally managed metadata,
3426 * or N.M for internally known formats
3429 metadata_show(mddev_t *mddev, char *page)
3431 if (mddev->persistent)
3432 return sprintf(page, "%d.%d\n",
3433 mddev->major_version, mddev->minor_version);
3434 else if (mddev->external)
3435 return sprintf(page, "external:%s\n", mddev->metadata_type);
3437 return sprintf(page, "none\n");
3441 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3445 /* Changing the details of 'external' metadata is
3446 * always permitted. Otherwise there must be
3447 * no devices attached to the array.
3449 if (mddev->external && strncmp(buf, "external:", 9) == 0)
3451 else if (!list_empty(&mddev->disks))
3454 if (cmd_match(buf, "none")) {
3455 mddev->persistent = 0;
3456 mddev->external = 0;
3457 mddev->major_version = 0;
3458 mddev->minor_version = 90;
3461 if (strncmp(buf, "external:", 9) == 0) {
3462 size_t namelen = len-9;
3463 if (namelen >= sizeof(mddev->metadata_type))
3464 namelen = sizeof(mddev->metadata_type)-1;
3465 strncpy(mddev->metadata_type, buf+9, namelen);
3466 mddev->metadata_type[namelen] = 0;
3467 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3468 mddev->metadata_type[--namelen] = 0;
3469 mddev->persistent = 0;
3470 mddev->external = 1;
3471 mddev->major_version = 0;
3472 mddev->minor_version = 90;
3475 major = simple_strtoul(buf, &e, 10);
3476 if (e==buf || *e != '.')
3479 minor = simple_strtoul(buf, &e, 10);
3480 if (e==buf || (*e && *e != '\n') )
3482 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3484 mddev->major_version = major;
3485 mddev->minor_version = minor;
3486 mddev->persistent = 1;
3487 mddev->external = 0;
3491 static struct md_sysfs_entry md_metadata =
3492 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3495 action_show(mddev_t *mddev, char *page)
3497 char *type = "idle";
3498 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3500 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3501 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3502 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3504 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3505 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3507 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3511 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3514 return sprintf(page, "%s\n", type);
3518 action_store(mddev_t *mddev, const char *page, size_t len)
3520 if (!mddev->pers || !mddev->pers->sync_request)
3523 if (cmd_match(page, "frozen"))
3524 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3526 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3528 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3529 if (mddev->sync_thread) {
3530 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3531 md_unregister_thread(mddev->sync_thread);
3532 mddev->sync_thread = NULL;
3533 mddev->recovery = 0;
3535 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3536 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3538 else if (cmd_match(page, "resync"))
3539 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3540 else if (cmd_match(page, "recover")) {
3541 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3542 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3543 } else if (cmd_match(page, "reshape")) {
3545 if (mddev->pers->start_reshape == NULL)
3547 err = mddev->pers->start_reshape(mddev);
3550 sysfs_notify(&mddev->kobj, NULL, "degraded");
3552 if (cmd_match(page, "check"))
3553 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3554 else if (!cmd_match(page, "repair"))
3556 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3557 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3559 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3560 md_wakeup_thread(mddev->thread);
3561 sysfs_notify_dirent(mddev->sysfs_action);
3566 mismatch_cnt_show(mddev_t *mddev, char *page)
3568 return sprintf(page, "%llu\n",
3569 (unsigned long long) mddev->resync_mismatches);
3572 static struct md_sysfs_entry md_scan_mode =
3573 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3576 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3579 sync_min_show(mddev_t *mddev, char *page)
3581 return sprintf(page, "%d (%s)\n", speed_min(mddev),
3582 mddev->sync_speed_min ? "local": "system");
3586 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3590 if (strncmp(buf, "system", 6)==0) {
3591 mddev->sync_speed_min = 0;
3594 min = simple_strtoul(buf, &e, 10);
3595 if (buf == e || (*e && *e != '\n') || min <= 0)
3597 mddev->sync_speed_min = min;
3601 static struct md_sysfs_entry md_sync_min =
3602 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3605 sync_max_show(mddev_t *mddev, char *page)
3607 return sprintf(page, "%d (%s)\n", speed_max(mddev),
3608 mddev->sync_speed_max ? "local": "system");
3612 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3616 if (strncmp(buf, "system", 6)==0) {
3617 mddev->sync_speed_max = 0;
3620 max = simple_strtoul(buf, &e, 10);
3621 if (buf == e || (*e && *e != '\n') || max <= 0)
3623 mddev->sync_speed_max = max;
3627 static struct md_sysfs_entry md_sync_max =
3628 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3631 degraded_show(mddev_t *mddev, char *page)
3633 return sprintf(page, "%d\n", mddev->degraded);
3635 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3638 sync_force_parallel_show(mddev_t *mddev, char *page)
3640 return sprintf(page, "%d\n", mddev->parallel_resync);
3644 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3648 if (strict_strtol(buf, 10, &n))
3651 if (n != 0 && n != 1)
3654 mddev->parallel_resync = n;
3656 if (mddev->sync_thread)
3657 wake_up(&resync_wait);
3662 /* force parallel resync, even with shared block devices */
3663 static struct md_sysfs_entry md_sync_force_parallel =
3664 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3665 sync_force_parallel_show, sync_force_parallel_store);
3668 sync_speed_show(mddev_t *mddev, char *page)
3670 unsigned long resync, dt, db;
3671 if (mddev->curr_resync == 0)
3672 return sprintf(page, "none\n");
3673 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3674 dt = (jiffies - mddev->resync_mark) / HZ;
3676 db = resync - mddev->resync_mark_cnt;
3677 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3680 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3683 sync_completed_show(mddev_t *mddev, char *page)
3685 unsigned long max_sectors, resync;
3687 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3688 return sprintf(page, "none\n");
3690 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3691 max_sectors = mddev->resync_max_sectors;
3693 max_sectors = mddev->dev_sectors;
3695 resync = mddev->curr_resync_completed;
3696 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3699 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3702 min_sync_show(mddev_t *mddev, char *page)
3704 return sprintf(page, "%llu\n",
3705 (unsigned long long)mddev->resync_min);
3708 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3710 unsigned long long min;
3711 if (strict_strtoull(buf, 10, &min))
3713 if (min > mddev->resync_max)
3715 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3718 /* Must be a multiple of chunk_size */
3719 if (mddev->chunk_sectors) {
3720 sector_t temp = min;
3721 if (sector_div(temp, mddev->chunk_sectors))
3724 mddev->resync_min = min;
3729 static struct md_sysfs_entry md_min_sync =
3730 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3733 max_sync_show(mddev_t *mddev, char *page)
3735 if (mddev->resync_max == MaxSector)
3736 return sprintf(page, "max\n");
3738 return sprintf(page, "%llu\n",
3739 (unsigned long long)mddev->resync_max);
3742 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3744 if (strncmp(buf, "max", 3) == 0)
3745 mddev->resync_max = MaxSector;
3747 unsigned long long max;
3748 if (strict_strtoull(buf, 10, &max))
3750 if (max < mddev->resync_min)
3752 if (max < mddev->resync_max &&
3754 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3757 /* Must be a multiple of chunk_size */
3758 if (mddev->chunk_sectors) {
3759 sector_t temp = max;
3760 if (sector_div(temp, mddev->chunk_sectors))
3763 mddev->resync_max = max;
3765 wake_up(&mddev->recovery_wait);
3769 static struct md_sysfs_entry md_max_sync =
3770 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3773 suspend_lo_show(mddev_t *mddev, char *page)
3775 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3779 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3782 unsigned long long new = simple_strtoull(buf, &e, 10);
3784 if (mddev->pers == NULL ||
3785 mddev->pers->quiesce == NULL)
3787 if (buf == e || (*e && *e != '\n'))
3789 if (new >= mddev->suspend_hi ||
3790 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3791 mddev->suspend_lo = new;
3792 mddev->pers->quiesce(mddev, 2);
3797 static struct md_sysfs_entry md_suspend_lo =
3798 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3802 suspend_hi_show(mddev_t *mddev, char *page)
3804 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3808 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3811 unsigned long long new = simple_strtoull(buf, &e, 10);
3813 if (mddev->pers == NULL ||
3814 mddev->pers->quiesce == NULL)
3816 if (buf == e || (*e && *e != '\n'))
3818 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3819 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3820 mddev->suspend_hi = new;
3821 mddev->pers->quiesce(mddev, 1);
3822 mddev->pers->quiesce(mddev, 0);
3827 static struct md_sysfs_entry md_suspend_hi =
3828 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3831 reshape_position_show(mddev_t *mddev, char *page)
3833 if (mddev->reshape_position != MaxSector)
3834 return sprintf(page, "%llu\n",
3835 (unsigned long long)mddev->reshape_position);
3836 strcpy(page, "none\n");
3841 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3844 unsigned long long new = simple_strtoull(buf, &e, 10);
3847 if (buf == e || (*e && *e != '\n'))
3849 mddev->reshape_position = new;
3850 mddev->delta_disks = 0;
3851 mddev->new_level = mddev->level;
3852 mddev->new_layout = mddev->layout;
3853 mddev->new_chunk_sectors = mddev->chunk_sectors;
3857 static struct md_sysfs_entry md_reshape_position =
3858 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3859 reshape_position_store);
3862 array_size_show(mddev_t *mddev, char *page)
3864 if (mddev->external_size)
3865 return sprintf(page, "%llu\n",
3866 (unsigned long long)mddev->array_sectors/2);
3868 return sprintf(page, "default\n");
3872 array_size_store(mddev_t *mddev, const char *buf, size_t len)
3876 if (strncmp(buf, "default", 7) == 0) {
3878 sectors = mddev->pers->size(mddev, 0, 0);
3880 sectors = mddev->array_sectors;
3882 mddev->external_size = 0;
3884 if (strict_blocks_to_sectors(buf, §ors) < 0)
3886 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
3889 mddev->external_size = 1;
3892 mddev->array_sectors = sectors;
3893 set_capacity(mddev->gendisk, mddev->array_sectors);
3895 revalidate_disk(mddev->gendisk);
3900 static struct md_sysfs_entry md_array_size =
3901 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
3904 static struct attribute *md_default_attrs[] = {
3907 &md_raid_disks.attr,
3908 &md_chunk_size.attr,
3910 &md_resync_start.attr,
3912 &md_new_device.attr,
3913 &md_safe_delay.attr,
3914 &md_array_state.attr,
3915 &md_reshape_position.attr,
3916 &md_array_size.attr,
3920 static struct attribute *md_redundancy_attrs[] = {
3922 &md_mismatches.attr,
3925 &md_sync_speed.attr,
3926 &md_sync_force_parallel.attr,
3927 &md_sync_completed.attr,
3930 &md_suspend_lo.attr,
3931 &md_suspend_hi.attr,
3936 static struct attribute_group md_redundancy_group = {
3938 .attrs = md_redundancy_attrs,
3943 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3945 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3946 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3951 rv = mddev_lock(mddev);
3953 rv = entry->show(mddev, page);
3954 mddev_unlock(mddev);
3960 md_attr_store(struct kobject *kobj, struct attribute *attr,
3961 const char *page, size_t length)
3963 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3964 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3969 if (!capable(CAP_SYS_ADMIN))
3971 rv = mddev_lock(mddev);
3972 if (mddev->hold_active == UNTIL_IOCTL)
3973 mddev->hold_active = 0;
3975 rv = entry->store(mddev, page, length);
3976 mddev_unlock(mddev);
3981 static void md_free(struct kobject *ko)
3983 mddev_t *mddev = container_of(ko, mddev_t, kobj);
3985 if (mddev->sysfs_state)
3986 sysfs_put(mddev->sysfs_state);
3988 if (mddev->gendisk) {
3989 del_gendisk(mddev->gendisk);
3990 put_disk(mddev->gendisk);
3993 blk_cleanup_queue(mddev->queue);
3998 static struct sysfs_ops md_sysfs_ops = {
3999 .show = md_attr_show,
4000 .store = md_attr_store,
4002 static struct kobj_type md_ktype = {
4004 .sysfs_ops = &md_sysfs_ops,
4005 .default_attrs = md_default_attrs,
4010 static void mddev_delayed_delete(struct work_struct *ws)
4012 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4014 if (mddev->private == &md_redundancy_group) {
4015 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4016 if (mddev->sysfs_action)
4017 sysfs_put(mddev->sysfs_action);
4018 mddev->sysfs_action = NULL;
4019 mddev->private = NULL;
4021 kobject_del(&mddev->kobj);
4022 kobject_put(&mddev->kobj);
4025 static int md_alloc(dev_t dev, char *name)
4027 static DEFINE_MUTEX(disks_mutex);
4028 mddev_t *mddev = mddev_find(dev);
4029 struct gendisk *disk;
4038 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4039 shift = partitioned ? MdpMinorShift : 0;
4040 unit = MINOR(mddev->unit) >> shift;
4042 /* wait for any previous instance if this device
4043 * to be completed removed (mddev_delayed_delete).
4045 flush_scheduled_work();
4047 mutex_lock(&disks_mutex);
4053 /* Need to ensure that 'name' is not a duplicate.
4056 spin_lock(&all_mddevs_lock);
4058 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4059 if (mddev2->gendisk &&
4060 strcmp(mddev2->gendisk->disk_name, name) == 0) {
4061 spin_unlock(&all_mddevs_lock);
4064 spin_unlock(&all_mddevs_lock);
4068 mddev->queue = blk_alloc_queue(GFP_KERNEL);
4071 mddev->queue->queuedata = mddev;
4073 /* Can be unlocked because the queue is new: no concurrency */
4074 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4076 blk_queue_make_request(mddev->queue, md_make_request);
4078 disk = alloc_disk(1 << shift);
4080 blk_cleanup_queue(mddev->queue);
4081 mddev->queue = NULL;
4084 disk->major = MAJOR(mddev->unit);
4085 disk->first_minor = unit << shift;
4087 strcpy(disk->disk_name, name);
4088 else if (partitioned)
4089 sprintf(disk->disk_name, "md_d%d", unit);
4091 sprintf(disk->disk_name, "md%d", unit);
4092 disk->fops = &md_fops;
4093 disk->private_data = mddev;
4094 disk->queue = mddev->queue;
4095 /* Allow extended partitions. This makes the
4096 * 'mdp' device redundant, but we can't really
4099 disk->flags |= GENHD_FL_EXT_DEVT;
4101 mddev->gendisk = disk;
4102 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4103 &disk_to_dev(disk)->kobj, "%s", "md");
4105 /* This isn't possible, but as kobject_init_and_add is marked
4106 * __must_check, we must do something with the result
4108 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4113 mutex_unlock(&disks_mutex);
4115 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4116 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
4122 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4124 md_alloc(dev, NULL);
4128 static int add_named_array(const char *val, struct kernel_param *kp)
4130 /* val must be "md_*" where * is not all digits.
4131 * We allocate an array with a large free minor number, and
4132 * set the name to val. val must not already be an active name.
4134 int len = strlen(val);
4135 char buf[DISK_NAME_LEN];
4137 while (len && val[len-1] == '\n')
4139 if (len >= DISK_NAME_LEN)
4141 strlcpy(buf, val, len+1);
4142 if (strncmp(buf, "md_", 3) != 0)
4144 return md_alloc(0, buf);
4147 static void md_safemode_timeout(unsigned long data)
4149 mddev_t *mddev = (mddev_t *) data;
4151 if (!atomic_read(&mddev->writes_pending)) {
4152 mddev->safemode = 1;
4153 if (mddev->external)
4154 sysfs_notify_dirent(mddev->sysfs_state);
4156 md_wakeup_thread(mddev->thread);
4159 static int start_dirty_degraded;
4161 static int do_md_run(mddev_t * mddev)
4165 struct gendisk *disk;
4166 struct mdk_personality *pers;
4168 if (list_empty(&mddev->disks))
4169 /* cannot run an array with no devices.. */
4176 * Analyze all RAID superblock(s)
4178 if (!mddev->raid_disks) {
4179 if (!mddev->persistent)
4184 if (mddev->level != LEVEL_NONE)
4185 request_module("md-level-%d", mddev->level);
4186 else if (mddev->clevel[0])
4187 request_module("md-%s", mddev->clevel);
4190 * Drop all container device buffers, from now on
4191 * the only valid external interface is through the md
4194 list_for_each_entry(rdev, &mddev->disks, same_set) {
4195 if (test_bit(Faulty, &rdev->flags))
4197 sync_blockdev(rdev->bdev);
4198 invalidate_bdev(rdev->bdev);
4200 /* perform some consistency tests on the device.
4201 * We don't want the data to overlap the metadata,
4202 * Internal Bitmap issues have been handled elsewhere.
4204 if (rdev->data_offset < rdev->sb_start) {
4205 if (mddev->dev_sectors &&
4206 rdev->data_offset + mddev->dev_sectors
4208 printk("md: %s: data overlaps metadata\n",
4213 if (rdev->sb_start + rdev->sb_size/512
4214 > rdev->data_offset) {
4215 printk("md: %s: metadata overlaps data\n",
4220 sysfs_notify_dirent(rdev->sysfs_state);
4223 md_probe(mddev->unit, NULL, NULL);
4224 disk = mddev->gendisk;
4228 spin_lock(&pers_lock);
4229 pers = find_pers(mddev->level, mddev->clevel);
4230 if (!pers || !try_module_get(pers->owner)) {
4231 spin_unlock(&pers_lock);
4232 if (mddev->level != LEVEL_NONE)
4233 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4236 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4241 spin_unlock(&pers_lock);
4242 if (mddev->level != pers->level) {
4243 mddev->level = pers->level;
4244 mddev->new_level = pers->level;
4246 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4248 if (mddev->reshape_position != MaxSector &&
4249 pers->start_reshape == NULL) {
4250 /* This personality cannot handle reshaping... */
4252 module_put(pers->owner);
4256 if (pers->sync_request) {
4257 /* Warn if this is a potentially silly
4260 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4264 list_for_each_entry(rdev, &mddev->disks, same_set)
4265 list_for_each_entry(rdev2, &mddev->disks, same_set) {
4267 rdev->bdev->bd_contains ==
4268 rdev2->bdev->bd_contains) {
4270 "%s: WARNING: %s appears to be"
4271 " on the same physical disk as"
4274 bdevname(rdev->bdev,b),
4275 bdevname(rdev2->bdev,b2));
4282 "True protection against single-disk"
4283 " failure might be compromised.\n");
4286 mddev->recovery = 0;
4287 /* may be over-ridden by personality */
4288 mddev->resync_max_sectors = mddev->dev_sectors;
4290 mddev->barriers_work = 1;
4291 mddev->ok_start_degraded = start_dirty_degraded;
4294 mddev->ro = 2; /* read-only, but switch on first write */
4296 err = mddev->pers->run(mddev);
4298 printk(KERN_ERR "md: pers->run() failed ...\n");
4299 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4300 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4301 " but 'external_size' not in effect?\n", __func__);
4303 "md: invalid array_size %llu > default size %llu\n",
4304 (unsigned long long)mddev->array_sectors / 2,
4305 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4307 mddev->pers->stop(mddev);
4309 if (err == 0 && mddev->pers->sync_request) {
4310 err = bitmap_create(mddev);
4312 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4313 mdname(mddev), err);
4314 mddev->pers->stop(mddev);
4318 module_put(mddev->pers->owner);
4320 bitmap_destroy(mddev);
4323 if (mddev->pers->sync_request) {
4324 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4326 "md: cannot register extra attributes for %s\n",
4328 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4329 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4332 atomic_set(&mddev->writes_pending,0);
4333 mddev->safemode = 0;
4334 mddev->safemode_timer.function = md_safemode_timeout;
4335 mddev->safemode_timer.data = (unsigned long) mddev;
4336 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4339 list_for_each_entry(rdev, &mddev->disks, same_set)
4340 if (rdev->raid_disk >= 0) {
4342 sprintf(nm, "rd%d", rdev->raid_disk);
4343 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4344 printk("md: cannot register %s for %s\n",
4348 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4351 md_update_sb(mddev, 0);
4353 set_capacity(disk, mddev->array_sectors);
4355 /* If there is a partially-recovered drive we need to
4356 * start recovery here. If we leave it to md_check_recovery,
4357 * it will remove the drives and not do the right thing
4359 if (mddev->degraded && !mddev->sync_thread) {
4361 list_for_each_entry(rdev, &mddev->disks, same_set)
4362 if (rdev->raid_disk >= 0 &&
4363 !test_bit(In_sync, &rdev->flags) &&
4364 !test_bit(Faulty, &rdev->flags))
4365 /* complete an interrupted recovery */
4367 if (spares && mddev->pers->sync_request) {
4368 mddev->recovery = 0;
4369 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4370 mddev->sync_thread = md_register_thread(md_do_sync,
4373 if (!mddev->sync_thread) {
4374 printk(KERN_ERR "%s: could not start resync"
4377 /* leave the spares where they are, it shouldn't hurt */
4378 mddev->recovery = 0;
4382 md_wakeup_thread(mddev->thread);
4383 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4385 revalidate_disk(mddev->gendisk);
4387 md_new_event(mddev);
4388 sysfs_notify_dirent(mddev->sysfs_state);
4389 if (mddev->sysfs_action)
4390 sysfs_notify_dirent(mddev->sysfs_action);
4391 sysfs_notify(&mddev->kobj, NULL, "degraded");
4392 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4396 static int restart_array(mddev_t *mddev)
4398 struct gendisk *disk = mddev->gendisk;
4400 /* Complain if it has no devices */
4401 if (list_empty(&mddev->disks))
4407 mddev->safemode = 0;
4409 set_disk_ro(disk, 0);
4410 printk(KERN_INFO "md: %s switched to read-write mode.\n",
4412 /* Kick recovery or resync if necessary */
4413 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4414 md_wakeup_thread(mddev->thread);
4415 md_wakeup_thread(mddev->sync_thread);
4416 sysfs_notify_dirent(mddev->sysfs_state);
4420 /* similar to deny_write_access, but accounts for our holding a reference
4421 * to the file ourselves */
4422 static int deny_bitmap_write_access(struct file * file)
4424 struct inode *inode = file->f_mapping->host;
4426 spin_lock(&inode->i_lock);
4427 if (atomic_read(&inode->i_writecount) > 1) {
4428 spin_unlock(&inode->i_lock);
4431 atomic_set(&inode->i_writecount, -1);
4432 spin_unlock(&inode->i_lock);
4437 static void restore_bitmap_write_access(struct file *file)
4439 struct inode *inode = file->f_mapping->host;
4441 spin_lock(&inode->i_lock);
4442 atomic_set(&inode->i_writecount, 1);
4443 spin_unlock(&inode->i_lock);
4447 * 0 - completely stop and dis-assemble array
4448 * 1 - switch to readonly
4449 * 2 - stop but do not disassemble array
4451 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4454 struct gendisk *disk = mddev->gendisk;
4457 mutex_lock(&mddev->open_mutex);
4458 if (atomic_read(&mddev->openers) > is_open) {
4459 printk("md: %s still in use.\n",mdname(mddev));
4461 } else if (mddev->pers) {
4463 if (mddev->sync_thread) {
4464 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4465 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4466 md_unregister_thread(mddev->sync_thread);
4467 mddev->sync_thread = NULL;
4470 del_timer_sync(&mddev->safemode_timer);
4473 case 1: /* readonly */
4479 case 0: /* disassemble */
4481 bitmap_flush(mddev);
4482 md_super_wait(mddev);
4484 set_disk_ro(disk, 0);
4486 mddev->pers->stop(mddev);
4487 mddev->queue->merge_bvec_fn = NULL;
4488 mddev->queue->unplug_fn = NULL;
4489 mddev->queue->backing_dev_info.congested_fn = NULL;
4490 module_put(mddev->pers->owner);
4491 if (mddev->pers->sync_request)
4492 mddev->private = &md_redundancy_group;
4494 /* tell userspace to handle 'inactive' */
4495 sysfs_notify_dirent(mddev->sysfs_state);
4497 list_for_each_entry(rdev, &mddev->disks, same_set)
4498 if (rdev->raid_disk >= 0) {
4500 sprintf(nm, "rd%d", rdev->raid_disk);
4501 sysfs_remove_link(&mddev->kobj, nm);
4504 set_capacity(disk, 0);
4510 if (!mddev->in_sync || mddev->flags) {
4511 /* mark array as shutdown cleanly */
4513 md_update_sb(mddev, 1);
4516 set_disk_ro(disk, 1);
4517 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4521 mutex_unlock(&mddev->open_mutex);
4525 * Free resources if final stop
4529 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4531 bitmap_destroy(mddev);
4532 if (mddev->bitmap_info.file) {
4533 restore_bitmap_write_access(mddev->bitmap_info.file);
4534 fput(mddev->bitmap_info.file);
4535 mddev->bitmap_info.file = NULL;
4537 mddev->bitmap_info.offset = 0;
4539 /* make sure all md_delayed_delete calls have finished */
4540 flush_scheduled_work();
4542 export_array(mddev);
4544 mddev->array_sectors = 0;
4545 mddev->external_size = 0;
4546 mddev->dev_sectors = 0;
4547 mddev->raid_disks = 0;
4548 mddev->recovery_cp = 0;
4549 mddev->resync_min = 0;
4550 mddev->resync_max = MaxSector;
4551 mddev->reshape_position = MaxSector;
4552 mddev->external = 0;
4553 mddev->persistent = 0;
4554 mddev->level = LEVEL_NONE;
4555 mddev->clevel[0] = 0;
4558 mddev->metadata_type[0] = 0;
4559 mddev->chunk_sectors = 0;
4560 mddev->ctime = mddev->utime = 0;
4562 mddev->max_disks = 0;
4564 mddev->delta_disks = 0;
4565 mddev->new_level = LEVEL_NONE;
4566 mddev->new_layout = 0;
4567 mddev->new_chunk_sectors = 0;
4568 mddev->curr_resync = 0;
4569 mddev->resync_mismatches = 0;
4570 mddev->suspend_lo = mddev->suspend_hi = 0;
4571 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4572 mddev->recovery = 0;
4575 mddev->degraded = 0;
4576 mddev->barriers_work = 0;
4577 mddev->safemode = 0;
4578 mddev->bitmap_info.offset = 0;
4579 mddev->bitmap_info.default_offset = 0;
4580 mddev->bitmap_info.chunksize = 0;
4581 mddev->bitmap_info.daemon_sleep = 0;
4582 mddev->bitmap_info.max_write_behind = 0;
4583 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4584 if (mddev->hold_active == UNTIL_STOP)
4585 mddev->hold_active = 0;
4587 } else if (mddev->pers)
4588 printk(KERN_INFO "md: %s switched to read-only mode.\n",
4591 blk_integrity_unregister(disk);
4592 md_new_event(mddev);
4593 sysfs_notify_dirent(mddev->sysfs_state);
4598 static void autorun_array(mddev_t *mddev)
4603 if (list_empty(&mddev->disks))
4606 printk(KERN_INFO "md: running: ");
4608 list_for_each_entry(rdev, &mddev->disks, same_set) {
4609 char b[BDEVNAME_SIZE];
4610 printk("<%s>", bdevname(rdev->bdev,b));
4614 err = do_md_run(mddev);
4616 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4617 do_md_stop(mddev, 0, 0);
4622 * lets try to run arrays based on all disks that have arrived
4623 * until now. (those are in pending_raid_disks)
4625 * the method: pick the first pending disk, collect all disks with
4626 * the same UUID, remove all from the pending list and put them into
4627 * the 'same_array' list. Then order this list based on superblock
4628 * update time (freshest comes first), kick out 'old' disks and
4629 * compare superblocks. If everything's fine then run it.
4631 * If "unit" is allocated, then bump its reference count
4633 static void autorun_devices(int part)
4635 mdk_rdev_t *rdev0, *rdev, *tmp;
4637 char b[BDEVNAME_SIZE];
4639 printk(KERN_INFO "md: autorun ...\n");
4640 while (!list_empty(&pending_raid_disks)) {
4643 LIST_HEAD(candidates);
4644 rdev0 = list_entry(pending_raid_disks.next,
4645 mdk_rdev_t, same_set);
4647 printk(KERN_INFO "md: considering %s ...\n",
4648 bdevname(rdev0->bdev,b));
4649 INIT_LIST_HEAD(&candidates);
4650 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4651 if (super_90_load(rdev, rdev0, 0) >= 0) {
4652 printk(KERN_INFO "md: adding %s ...\n",
4653 bdevname(rdev->bdev,b));
4654 list_move(&rdev->same_set, &candidates);
4657 * now we have a set of devices, with all of them having
4658 * mostly sane superblocks. It's time to allocate the
4662 dev = MKDEV(mdp_major,
4663 rdev0->preferred_minor << MdpMinorShift);
4664 unit = MINOR(dev) >> MdpMinorShift;
4666 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4669 if (rdev0->preferred_minor != unit) {
4670 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4671 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4675 md_probe(dev, NULL, NULL);
4676 mddev = mddev_find(dev);
4677 if (!mddev || !mddev->gendisk) {
4681 "md: cannot allocate memory for md drive.\n");
4684 if (mddev_lock(mddev))
4685 printk(KERN_WARNING "md: %s locked, cannot run\n",
4687 else if (mddev->raid_disks || mddev->major_version
4688 || !list_empty(&mddev->disks)) {
4690 "md: %s already running, cannot run %s\n",
4691 mdname(mddev), bdevname(rdev0->bdev,b));
4692 mddev_unlock(mddev);
4694 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4695 mddev->persistent = 1;
4696 rdev_for_each_list(rdev, tmp, &candidates) {
4697 list_del_init(&rdev->same_set);
4698 if (bind_rdev_to_array(rdev, mddev))
4701 autorun_array(mddev);
4702 mddev_unlock(mddev);
4704 /* on success, candidates will be empty, on error
4707 rdev_for_each_list(rdev, tmp, &candidates) {
4708 list_del_init(&rdev->same_set);
4713 printk(KERN_INFO "md: ... autorun DONE.\n");
4715 #endif /* !MODULE */
4717 static int get_version(void __user * arg)
4721 ver.major = MD_MAJOR_VERSION;
4722 ver.minor = MD_MINOR_VERSION;
4723 ver.patchlevel = MD_PATCHLEVEL_VERSION;
4725 if (copy_to_user(arg, &ver, sizeof(ver)))
4731 static int get_array_info(mddev_t * mddev, void __user * arg)
4733 mdu_array_info_t info;
4734 int nr,working,insync,failed,spare;
4737 nr=working=insync=failed=spare=0;
4738 list_for_each_entry(rdev, &mddev->disks, same_set) {
4740 if (test_bit(Faulty, &rdev->flags))
4744 if (test_bit(In_sync, &rdev->flags))
4751 info.major_version = mddev->major_version;
4752 info.minor_version = mddev->minor_version;
4753 info.patch_version = MD_PATCHLEVEL_VERSION;
4754 info.ctime = mddev->ctime;
4755 info.level = mddev->level;
4756 info.size = mddev->dev_sectors / 2;
4757 if (info.size != mddev->dev_sectors / 2) /* overflow */
4760 info.raid_disks = mddev->raid_disks;
4761 info.md_minor = mddev->md_minor;
4762 info.not_persistent= !mddev->persistent;
4764 info.utime = mddev->utime;
4767 info.state = (1<<MD_SB_CLEAN);
4768 if (mddev->bitmap && mddev->bitmap_info.offset)
4769 info.state = (1<<MD_SB_BITMAP_PRESENT);
4770 info.active_disks = insync;
4771 info.working_disks = working;
4772 info.failed_disks = failed;
4773 info.spare_disks = spare;
4775 info.layout = mddev->layout;
4776 info.chunk_size = mddev->chunk_sectors << 9;
4778 if (copy_to_user(arg, &info, sizeof(info)))
4784 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4786 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4787 char *ptr, *buf = NULL;
4790 if (md_allow_write(mddev))
4791 file = kmalloc(sizeof(*file), GFP_NOIO);
4793 file = kmalloc(sizeof(*file), GFP_KERNEL);
4798 /* bitmap disabled, zero the first byte and copy out */
4799 if (!mddev->bitmap || !mddev->bitmap->file) {
4800 file->pathname[0] = '\0';
4804 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4808 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4812 strcpy(file->pathname, ptr);
4816 if (copy_to_user(arg, file, sizeof(*file)))
4824 static int get_disk_info(mddev_t * mddev, void __user * arg)
4826 mdu_disk_info_t info;
4829 if (copy_from_user(&info, arg, sizeof(info)))
4832 rdev = find_rdev_nr(mddev, info.number);
4834 info.major = MAJOR(rdev->bdev->bd_dev);
4835 info.minor = MINOR(rdev->bdev->bd_dev);
4836 info.raid_disk = rdev->raid_disk;
4838 if (test_bit(Faulty, &rdev->flags))
4839 info.state |= (1<<MD_DISK_FAULTY);
4840 else if (test_bit(In_sync, &rdev->flags)) {
4841 info.state |= (1<<MD_DISK_ACTIVE);
4842 info.state |= (1<<MD_DISK_SYNC);
4844 if (test_bit(WriteMostly, &rdev->flags))
4845 info.state |= (1<<MD_DISK_WRITEMOSTLY);
4847 info.major = info.minor = 0;
4848 info.raid_disk = -1;
4849 info.state = (1<<MD_DISK_REMOVED);
4852 if (copy_to_user(arg, &info, sizeof(info)))
4858 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4860 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4862 dev_t dev = MKDEV(info->major,info->minor);
4864 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4867 if (!mddev->raid_disks) {
4869 /* expecting a device which has a superblock */
4870 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4873 "md: md_import_device returned %ld\n",
4875 return PTR_ERR(rdev);
4877 if (!list_empty(&mddev->disks)) {
4878 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4879 mdk_rdev_t, same_set);
4880 err = super_types[mddev->major_version]
4881 .load_super(rdev, rdev0, mddev->minor_version);
4884 "md: %s has different UUID to %s\n",
4885 bdevname(rdev->bdev,b),
4886 bdevname(rdev0->bdev,b2));
4891 err = bind_rdev_to_array(rdev, mddev);
4898 * add_new_disk can be used once the array is assembled
4899 * to add "hot spares". They must already have a superblock
4904 if (!mddev->pers->hot_add_disk) {
4906 "%s: personality does not support diskops!\n",
4910 if (mddev->persistent)
4911 rdev = md_import_device(dev, mddev->major_version,
4912 mddev->minor_version);
4914 rdev = md_import_device(dev, -1, -1);
4917 "md: md_import_device returned %ld\n",
4919 return PTR_ERR(rdev);
4921 /* set save_raid_disk if appropriate */
4922 if (!mddev->persistent) {
4923 if (info->state & (1<<MD_DISK_SYNC) &&
4924 info->raid_disk < mddev->raid_disks)
4925 rdev->raid_disk = info->raid_disk;
4927 rdev->raid_disk = -1;
4929 super_types[mddev->major_version].
4930 validate_super(mddev, rdev);
4931 rdev->saved_raid_disk = rdev->raid_disk;
4933 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4934 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4935 set_bit(WriteMostly, &rdev->flags);
4937 clear_bit(WriteMostly, &rdev->flags);
4939 rdev->raid_disk = -1;
4940 err = bind_rdev_to_array(rdev, mddev);
4941 if (!err && !mddev->pers->hot_remove_disk) {
4942 /* If there is hot_add_disk but no hot_remove_disk
4943 * then added disks for geometry changes,
4944 * and should be added immediately.
4946 super_types[mddev->major_version].
4947 validate_super(mddev, rdev);
4948 err = mddev->pers->hot_add_disk(mddev, rdev);
4950 unbind_rdev_from_array(rdev);
4955 sysfs_notify_dirent(rdev->sysfs_state);
4957 md_update_sb(mddev, 1);
4958 if (mddev->degraded)
4959 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4960 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4961 md_wakeup_thread(mddev->thread);
4965 /* otherwise, add_new_disk is only allowed
4966 * for major_version==0 superblocks
4968 if (mddev->major_version != 0) {
4969 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4974 if (!(info->state & (1<<MD_DISK_FAULTY))) {
4976 rdev = md_import_device(dev, -1, 0);
4979 "md: error, md_import_device() returned %ld\n",
4981 return PTR_ERR(rdev);
4983 rdev->desc_nr = info->number;
4984 if (info->raid_disk < mddev->raid_disks)
4985 rdev->raid_disk = info->raid_disk;
4987 rdev->raid_disk = -1;
4989 if (rdev->raid_disk < mddev->raid_disks)
4990 if (info->state & (1<<MD_DISK_SYNC))
4991 set_bit(In_sync, &rdev->flags);
4993 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4994 set_bit(WriteMostly, &rdev->flags);
4996 if (!mddev->persistent) {
4997 printk(KERN_INFO "md: nonpersistent superblock ...\n");
4998 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5000 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5001 rdev->sectors = rdev->sb_start;
5003 err = bind_rdev_to_array(rdev, mddev);
5013 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
5015 char b[BDEVNAME_SIZE];
5018 rdev = find_rdev(mddev, dev);
5022 if (rdev->raid_disk >= 0)
5025 kick_rdev_from_array(rdev);
5026 md_update_sb(mddev, 1);
5027 md_new_event(mddev);
5031 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5032 bdevname(rdev->bdev,b), mdname(mddev));
5036 static int hot_add_disk(mddev_t * mddev, dev_t dev)
5038 char b[BDEVNAME_SIZE];
5045 if (mddev->major_version != 0) {
5046 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5047 " version-0 superblocks.\n",
5051 if (!mddev->pers->hot_add_disk) {
5053 "%s: personality does not support diskops!\n",
5058 rdev = md_import_device(dev, -1, 0);
5061 "md: error, md_import_device() returned %ld\n",
5066 if (mddev->persistent)
5067 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5069 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5071 rdev->sectors = rdev->sb_start;
5073 if (test_bit(Faulty, &rdev->flags)) {
5075 "md: can not hot-add faulty %s disk to %s!\n",
5076 bdevname(rdev->bdev,b), mdname(mddev));
5080 clear_bit(In_sync, &rdev->flags);
5082 rdev->saved_raid_disk = -1;
5083 err = bind_rdev_to_array(rdev, mddev);
5088 * The rest should better be atomic, we can have disk failures
5089 * noticed in interrupt contexts ...
5092 rdev->raid_disk = -1;
5094 md_update_sb(mddev, 1);
5097 * Kick recovery, maybe this spare has to be added to the
5098 * array immediately.
5100 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5101 md_wakeup_thread(mddev->thread);
5102 md_new_event(mddev);
5110 static int set_bitmap_file(mddev_t *mddev, int fd)
5115 if (!mddev->pers->quiesce)
5117 if (mddev->recovery || mddev->sync_thread)
5119 /* we should be able to change the bitmap.. */
5125 return -EEXIST; /* cannot add when bitmap is present */
5126 mddev->bitmap_info.file = fget(fd);
5128 if (mddev->bitmap_info.file == NULL) {
5129 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5134 err = deny_bitmap_write_access(mddev->bitmap_info.file);
5136 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5138 fput(mddev->bitmap_info.file);
5139 mddev->bitmap_info.file = NULL;
5142 mddev->bitmap_info.offset = 0; /* file overrides offset */
5143 } else if (mddev->bitmap == NULL)
5144 return -ENOENT; /* cannot remove what isn't there */
5147 mddev->pers->quiesce(mddev, 1);
5149 err = bitmap_create(mddev);
5150 if (fd < 0 || err) {
5151 bitmap_destroy(mddev);
5152 fd = -1; /* make sure to put the file */
5154 mddev->pers->quiesce(mddev, 0);
5157 if (mddev->bitmap_info.file) {
5158 restore_bitmap_write_access(mddev->bitmap_info.file);
5159 fput(mddev->bitmap_info.file);
5161 mddev->bitmap_info.file = NULL;
5168 * set_array_info is used two different ways
5169 * The original usage is when creating a new array.
5170 * In this usage, raid_disks is > 0 and it together with
5171 * level, size, not_persistent,layout,chunksize determine the
5172 * shape of the array.
5173 * This will always create an array with a type-0.90.0 superblock.
5174 * The newer usage is when assembling an array.
5175 * In this case raid_disks will be 0, and the major_version field is
5176 * use to determine which style super-blocks are to be found on the devices.
5177 * The minor and patch _version numbers are also kept incase the
5178 * super_block handler wishes to interpret them.
5180 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5183 if (info->raid_disks == 0) {
5184 /* just setting version number for superblock loading */
5185 if (info->major_version < 0 ||
5186 info->major_version >= ARRAY_SIZE(super_types) ||
5187 super_types[info->major_version].name == NULL) {
5188 /* maybe try to auto-load a module? */
5190 "md: superblock version %d not known\n",
5191 info->major_version);
5194 mddev->major_version = info->major_version;
5195 mddev->minor_version = info->minor_version;
5196 mddev->patch_version = info->patch_version;
5197 mddev->persistent = !info->not_persistent;
5200 mddev->major_version = MD_MAJOR_VERSION;
5201 mddev->minor_version = MD_MINOR_VERSION;
5202 mddev->patch_version = MD_PATCHLEVEL_VERSION;
5203 mddev->ctime = get_seconds();
5205 mddev->level = info->level;
5206 mddev->clevel[0] = 0;
5207 mddev->dev_sectors = 2 * (sector_t)info->size;
5208 mddev->raid_disks = info->raid_disks;
5209 /* don't set md_minor, it is determined by which /dev/md* was
5212 if (info->state & (1<<MD_SB_CLEAN))
5213 mddev->recovery_cp = MaxSector;
5215 mddev->recovery_cp = 0;
5216 mddev->persistent = ! info->not_persistent;
5217 mddev->external = 0;
5219 mddev->layout = info->layout;
5220 mddev->chunk_sectors = info->chunk_size >> 9;
5222 mddev->max_disks = MD_SB_DISKS;
5224 if (mddev->persistent)
5226 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5228 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5229 mddev->bitmap_info.offset = 0;
5231 mddev->reshape_position = MaxSector;
5234 * Generate a 128 bit UUID
5236 get_random_bytes(mddev->uuid, 16);
5238 mddev->new_level = mddev->level;
5239 mddev->new_chunk_sectors = mddev->chunk_sectors;
5240 mddev->new_layout = mddev->layout;
5241 mddev->delta_disks = 0;
5246 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5248 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5250 if (mddev->external_size)
5253 mddev->array_sectors = array_sectors;
5255 EXPORT_SYMBOL(md_set_array_sectors);
5257 static int update_size(mddev_t *mddev, sector_t num_sectors)
5261 int fit = (num_sectors == 0);
5263 if (mddev->pers->resize == NULL)
5265 /* The "num_sectors" is the number of sectors of each device that
5266 * is used. This can only make sense for arrays with redundancy.
5267 * linear and raid0 always use whatever space is available. We can only
5268 * consider changing this number if no resync or reconstruction is
5269 * happening, and if the new size is acceptable. It must fit before the
5270 * sb_start or, if that is <data_offset, it must fit before the size
5271 * of each device. If num_sectors is zero, we find the largest size
5275 if (mddev->sync_thread)
5278 /* Sorry, cannot grow a bitmap yet, just remove it,
5282 list_for_each_entry(rdev, &mddev->disks, same_set) {
5283 sector_t avail = rdev->sectors;
5285 if (fit && (num_sectors == 0 || num_sectors > avail))
5286 num_sectors = avail;
5287 if (avail < num_sectors)
5290 rv = mddev->pers->resize(mddev, num_sectors);
5292 revalidate_disk(mddev->gendisk);
5296 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5299 /* change the number of raid disks */
5300 if (mddev->pers->check_reshape == NULL)
5302 if (raid_disks <= 0 ||
5303 raid_disks >= mddev->max_disks)
5305 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5307 mddev->delta_disks = raid_disks - mddev->raid_disks;
5309 rv = mddev->pers->check_reshape(mddev);
5315 * update_array_info is used to change the configuration of an
5317 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5318 * fields in the info are checked against the array.
5319 * Any differences that cannot be handled will cause an error.
5320 * Normally, only one change can be managed at a time.
5322 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5328 /* calculate expected state,ignoring low bits */
5329 if (mddev->bitmap && mddev->bitmap_info.offset)
5330 state |= (1 << MD_SB_BITMAP_PRESENT);
5332 if (mddev->major_version != info->major_version ||
5333 mddev->minor_version != info->minor_version ||
5334 /* mddev->patch_version != info->patch_version || */
5335 mddev->ctime != info->ctime ||
5336 mddev->level != info->level ||
5337 /* mddev->layout != info->layout || */
5338 !mddev->persistent != info->not_persistent||
5339 mddev->chunk_sectors != info->chunk_size >> 9 ||
5340 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5341 ((state^info->state) & 0xfffffe00)
5344 /* Check there is only one change */
5345 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5347 if (mddev->raid_disks != info->raid_disks)
5349 if (mddev->layout != info->layout)
5351 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5358 if (mddev->layout != info->layout) {
5360 * we don't need to do anything at the md level, the
5361 * personality will take care of it all.
5363 if (mddev->pers->check_reshape == NULL)
5366 mddev->new_layout = info->layout;
5367 rv = mddev->pers->check_reshape(mddev);
5369 mddev->new_layout = mddev->layout;
5373 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5374 rv = update_size(mddev, (sector_t)info->size * 2);
5376 if (mddev->raid_disks != info->raid_disks)
5377 rv = update_raid_disks(mddev, info->raid_disks);
5379 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5380 if (mddev->pers->quiesce == NULL)
5382 if (mddev->recovery || mddev->sync_thread)
5384 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5385 /* add the bitmap */
5388 if (mddev->bitmap_info.default_offset == 0)
5390 mddev->bitmap_info.offset =
5391 mddev->bitmap_info.default_offset;
5392 mddev->pers->quiesce(mddev, 1);
5393 rv = bitmap_create(mddev);
5395 bitmap_destroy(mddev);
5396 mddev->pers->quiesce(mddev, 0);
5398 /* remove the bitmap */
5401 if (mddev->bitmap->file)
5403 mddev->pers->quiesce(mddev, 1);
5404 bitmap_destroy(mddev);
5405 mddev->pers->quiesce(mddev, 0);
5406 mddev->bitmap_info.offset = 0;
5409 md_update_sb(mddev, 1);
5413 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5417 if (mddev->pers == NULL)
5420 rdev = find_rdev(mddev, dev);
5424 md_error(mddev, rdev);
5429 * We have a problem here : there is no easy way to give a CHS
5430 * virtual geometry. We currently pretend that we have a 2 heads
5431 * 4 sectors (with a BIG number of cylinders...). This drives
5432 * dosfs just mad... ;-)
5434 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5436 mddev_t *mddev = bdev->bd_disk->private_data;
5440 geo->cylinders = get_capacity(mddev->gendisk) / 8;
5444 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5445 unsigned int cmd, unsigned long arg)
5448 void __user *argp = (void __user *)arg;
5449 mddev_t *mddev = NULL;
5451 if (!capable(CAP_SYS_ADMIN))
5455 * Commands dealing with the RAID driver but not any
5461 err = get_version(argp);
5464 case PRINT_RAID_DEBUG:
5472 autostart_arrays(arg);
5479 * Commands creating/starting a new array:
5482 mddev = bdev->bd_disk->private_data;
5489 err = mddev_lock(mddev);
5492 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5499 case SET_ARRAY_INFO:
5501 mdu_array_info_t info;
5503 memset(&info, 0, sizeof(info));
5504 else if (copy_from_user(&info, argp, sizeof(info))) {
5509 err = update_array_info(mddev, &info);
5511 printk(KERN_WARNING "md: couldn't update"
5512 " array info. %d\n", err);
5517 if (!list_empty(&mddev->disks)) {
5519 "md: array %s already has disks!\n",
5524 if (mddev->raid_disks) {
5526 "md: array %s already initialised!\n",
5531 err = set_array_info(mddev, &info);
5533 printk(KERN_WARNING "md: couldn't set"
5534 " array info. %d\n", err);
5544 * Commands querying/configuring an existing array:
5546 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5547 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5548 if ((!mddev->raid_disks && !mddev->external)
5549 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5550 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5551 && cmd != GET_BITMAP_FILE) {
5557 * Commands even a read-only array can execute:
5561 case GET_ARRAY_INFO:
5562 err = get_array_info(mddev, argp);
5565 case GET_BITMAP_FILE:
5566 err = get_bitmap_file(mddev, argp);
5570 err = get_disk_info(mddev, argp);
5573 case RESTART_ARRAY_RW:
5574 err = restart_array(mddev);
5578 err = do_md_stop(mddev, 0, 1);
5582 err = do_md_stop(mddev, 1, 1);
5588 * The remaining ioctls are changing the state of the
5589 * superblock, so we do not allow them on read-only arrays.
5590 * However non-MD ioctls (e.g. get-size) will still come through
5591 * here and hit the 'default' below, so only disallow
5592 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5594 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5595 if (mddev->ro == 2) {
5597 sysfs_notify_dirent(mddev->sysfs_state);
5598 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5599 md_wakeup_thread(mddev->thread);
5610 mdu_disk_info_t info;
5611 if (copy_from_user(&info, argp, sizeof(info)))
5614 err = add_new_disk(mddev, &info);
5618 case HOT_REMOVE_DISK:
5619 err = hot_remove_disk(mddev, new_decode_dev(arg));
5623 err = hot_add_disk(mddev, new_decode_dev(arg));
5626 case SET_DISK_FAULTY:
5627 err = set_disk_faulty(mddev, new_decode_dev(arg));
5631 err = do_md_run(mddev);
5634 case SET_BITMAP_FILE:
5635 err = set_bitmap_file(mddev, (int)arg);
5645 if (mddev->hold_active == UNTIL_IOCTL &&
5647 mddev->hold_active = 0;
5648 mddev_unlock(mddev);
5658 static int md_open(struct block_device *bdev, fmode_t mode)
5661 * Succeed if we can lock the mddev, which confirms that
5662 * it isn't being stopped right now.
5664 mddev_t *mddev = mddev_find(bdev->bd_dev);
5667 if (mddev->gendisk != bdev->bd_disk) {
5668 /* we are racing with mddev_put which is discarding this
5672 /* Wait until bdev->bd_disk is definitely gone */
5673 flush_scheduled_work();
5674 /* Then retry the open from the top */
5675 return -ERESTARTSYS;
5677 BUG_ON(mddev != bdev->bd_disk->private_data);
5679 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5683 atomic_inc(&mddev->openers);
5684 mutex_unlock(&mddev->open_mutex);
5686 check_disk_change(bdev);
5691 static int md_release(struct gendisk *disk, fmode_t mode)
5693 mddev_t *mddev = disk->private_data;
5696 atomic_dec(&mddev->openers);
5702 static int md_media_changed(struct gendisk *disk)
5704 mddev_t *mddev = disk->private_data;
5706 return mddev->changed;
5709 static int md_revalidate(struct gendisk *disk)
5711 mddev_t *mddev = disk->private_data;
5716 static const struct block_device_operations md_fops =
5718 .owner = THIS_MODULE,
5720 .release = md_release,
5722 .getgeo = md_getgeo,
5723 .media_changed = md_media_changed,
5724 .revalidate_disk= md_revalidate,
5727 static int md_thread(void * arg)
5729 mdk_thread_t *thread = arg;
5732 * md_thread is a 'system-thread', it's priority should be very
5733 * high. We avoid resource deadlocks individually in each
5734 * raid personality. (RAID5 does preallocation) We also use RR and
5735 * the very same RT priority as kswapd, thus we will never get
5736 * into a priority inversion deadlock.
5738 * we definitely have to have equal or higher priority than
5739 * bdflush, otherwise bdflush will deadlock if there are too
5740 * many dirty RAID5 blocks.
5743 allow_signal(SIGKILL);
5744 while (!kthread_should_stop()) {
5746 /* We need to wait INTERRUPTIBLE so that
5747 * we don't add to the load-average.
5748 * That means we need to be sure no signals are
5751 if (signal_pending(current))
5752 flush_signals(current);
5754 wait_event_interruptible_timeout
5756 test_bit(THREAD_WAKEUP, &thread->flags)
5757 || kthread_should_stop(),
5760 clear_bit(THREAD_WAKEUP, &thread->flags);
5762 thread->run(thread->mddev);
5768 void md_wakeup_thread(mdk_thread_t *thread)
5771 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5772 set_bit(THREAD_WAKEUP, &thread->flags);
5773 wake_up(&thread->wqueue);
5777 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5780 mdk_thread_t *thread;
5782 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5786 init_waitqueue_head(&thread->wqueue);
5789 thread->mddev = mddev;
5790 thread->timeout = MAX_SCHEDULE_TIMEOUT;
5791 thread->tsk = kthread_run(md_thread, thread,
5793 mdname(thread->mddev),
5794 name ?: mddev->pers->name);
5795 if (IS_ERR(thread->tsk)) {
5802 void md_unregister_thread(mdk_thread_t *thread)
5806 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5808 kthread_stop(thread->tsk);
5812 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5819 if (!rdev || test_bit(Faulty, &rdev->flags))
5822 if (mddev->external)
5823 set_bit(Blocked, &rdev->flags);
5825 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5827 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5828 __builtin_return_address(0),__builtin_return_address(1),
5829 __builtin_return_address(2),__builtin_return_address(3));
5833 if (!mddev->pers->error_handler)
5835 mddev->pers->error_handler(mddev,rdev);
5836 if (mddev->degraded)
5837 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5838 set_bit(StateChanged, &rdev->flags);
5839 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5840 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5841 md_wakeup_thread(mddev->thread);
5842 md_new_event_inintr(mddev);
5845 /* seq_file implementation /proc/mdstat */
5847 static void status_unused(struct seq_file *seq)
5852 seq_printf(seq, "unused devices: ");
5854 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
5855 char b[BDEVNAME_SIZE];
5857 seq_printf(seq, "%s ",
5858 bdevname(rdev->bdev,b));
5861 seq_printf(seq, "<none>");
5863 seq_printf(seq, "\n");
5867 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5869 sector_t max_sectors, resync, res;
5870 unsigned long dt, db;
5873 unsigned int per_milli;
5875 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
5877 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5878 max_sectors = mddev->resync_max_sectors;
5880 max_sectors = mddev->dev_sectors;
5883 * Should not happen.
5889 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5890 * in a sector_t, and (max_sectors>>scale) will fit in a
5891 * u32, as those are the requirements for sector_div.
5892 * Thus 'scale' must be at least 10
5895 if (sizeof(sector_t) > sizeof(unsigned long)) {
5896 while ( max_sectors/2 > (1ULL<<(scale+32)))
5899 res = (resync>>scale)*1000;
5900 sector_div(res, (u32)((max_sectors>>scale)+1));
5904 int i, x = per_milli/50, y = 20-x;
5905 seq_printf(seq, "[");
5906 for (i = 0; i < x; i++)
5907 seq_printf(seq, "=");
5908 seq_printf(seq, ">");
5909 for (i = 0; i < y; i++)
5910 seq_printf(seq, ".");
5911 seq_printf(seq, "] ");
5913 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5914 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5916 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5918 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5919 "resync" : "recovery"))),
5920 per_milli/10, per_milli % 10,
5921 (unsigned long long) resync/2,
5922 (unsigned long long) max_sectors/2);
5925 * dt: time from mark until now
5926 * db: blocks written from mark until now
5927 * rt: remaining time
5929 * rt is a sector_t, so could be 32bit or 64bit.
5930 * So we divide before multiply in case it is 32bit and close
5932 * We scale the divisor (db) by 32 to avoid loosing precision
5933 * near the end of resync when the number of remaining sectors
5935 * We then divide rt by 32 after multiplying by db to compensate.
5936 * The '+1' avoids division by zero if db is very small.
5938 dt = ((jiffies - mddev->resync_mark) / HZ);
5940 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5941 - mddev->resync_mark_cnt;
5943 rt = max_sectors - resync; /* number of remaining sectors */
5944 sector_div(rt, db/32+1);
5948 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
5949 ((unsigned long)rt % 60)/6);
5951 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5954 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5956 struct list_head *tmp;
5966 spin_lock(&all_mddevs_lock);
5967 list_for_each(tmp,&all_mddevs)
5969 mddev = list_entry(tmp, mddev_t, all_mddevs);
5971 spin_unlock(&all_mddevs_lock);
5974 spin_unlock(&all_mddevs_lock);
5976 return (void*)2;/* tail */
5980 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5982 struct list_head *tmp;
5983 mddev_t *next_mddev, *mddev = v;
5989 spin_lock(&all_mddevs_lock);
5991 tmp = all_mddevs.next;
5993 tmp = mddev->all_mddevs.next;
5994 if (tmp != &all_mddevs)
5995 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5997 next_mddev = (void*)2;
6000 spin_unlock(&all_mddevs_lock);
6008 static void md_seq_stop(struct seq_file *seq, void *v)
6012 if (mddev && v != (void*)1 && v != (void*)2)
6016 struct mdstat_info {
6020 static int md_seq_show(struct seq_file *seq, void *v)
6025 struct mdstat_info *mi = seq->private;
6026 struct bitmap *bitmap;
6028 if (v == (void*)1) {
6029 struct mdk_personality *pers;
6030 seq_printf(seq, "Personalities : ");
6031 spin_lock(&pers_lock);
6032 list_for_each_entry(pers, &pers_list, list)
6033 seq_printf(seq, "[%s] ", pers->name);
6035 spin_unlock(&pers_lock);
6036 seq_printf(seq, "\n");
6037 mi->event = atomic_read(&md_event_count);
6040 if (v == (void*)2) {
6045 if (mddev_lock(mddev) < 0)
6048 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6049 seq_printf(seq, "%s : %sactive", mdname(mddev),
6050 mddev->pers ? "" : "in");
6053 seq_printf(seq, " (read-only)");
6055 seq_printf(seq, " (auto-read-only)");
6056 seq_printf(seq, " %s", mddev->pers->name);
6060 list_for_each_entry(rdev, &mddev->disks, same_set) {
6061 char b[BDEVNAME_SIZE];
6062 seq_printf(seq, " %s[%d]",
6063 bdevname(rdev->bdev,b), rdev->desc_nr);
6064 if (test_bit(WriteMostly, &rdev->flags))
6065 seq_printf(seq, "(W)");
6066 if (test_bit(Faulty, &rdev->flags)) {
6067 seq_printf(seq, "(F)");
6069 } else if (rdev->raid_disk < 0)
6070 seq_printf(seq, "(S)"); /* spare */
6071 sectors += rdev->sectors;
6074 if (!list_empty(&mddev->disks)) {
6076 seq_printf(seq, "\n %llu blocks",
6077 (unsigned long long)
6078 mddev->array_sectors / 2);
6080 seq_printf(seq, "\n %llu blocks",
6081 (unsigned long long)sectors / 2);
6083 if (mddev->persistent) {
6084 if (mddev->major_version != 0 ||
6085 mddev->minor_version != 90) {
6086 seq_printf(seq," super %d.%d",
6087 mddev->major_version,
6088 mddev->minor_version);
6090 } else if (mddev->external)
6091 seq_printf(seq, " super external:%s",
6092 mddev->metadata_type);
6094 seq_printf(seq, " super non-persistent");
6097 mddev->pers->status(seq, mddev);
6098 seq_printf(seq, "\n ");
6099 if (mddev->pers->sync_request) {
6100 if (mddev->curr_resync > 2) {
6101 status_resync(seq, mddev);
6102 seq_printf(seq, "\n ");
6103 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
6104 seq_printf(seq, "\tresync=DELAYED\n ");
6105 else if (mddev->recovery_cp < MaxSector)
6106 seq_printf(seq, "\tresync=PENDING\n ");
6109 seq_printf(seq, "\n ");
6111 if ((bitmap = mddev->bitmap)) {
6112 unsigned long chunk_kb;
6113 unsigned long flags;
6114 spin_lock_irqsave(&bitmap->lock, flags);
6115 chunk_kb = mddev->bitmap_info.chunksize >> 10;
6116 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6118 bitmap->pages - bitmap->missing_pages,
6120 (bitmap->pages - bitmap->missing_pages)
6121 << (PAGE_SHIFT - 10),
6122 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6123 chunk_kb ? "KB" : "B");
6125 seq_printf(seq, ", file: ");
6126 seq_path(seq, &bitmap->file->f_path, " \t\n");
6129 seq_printf(seq, "\n");
6130 spin_unlock_irqrestore(&bitmap->lock, flags);
6133 seq_printf(seq, "\n");
6135 mddev_unlock(mddev);
6140 static const struct seq_operations md_seq_ops = {
6141 .start = md_seq_start,
6142 .next = md_seq_next,
6143 .stop = md_seq_stop,
6144 .show = md_seq_show,
6147 static int md_seq_open(struct inode *inode, struct file *file)
6150 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6154 error = seq_open(file, &md_seq_ops);
6158 struct seq_file *p = file->private_data;
6160 mi->event = atomic_read(&md_event_count);
6165 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6167 struct seq_file *m = filp->private_data;
6168 struct mdstat_info *mi = m->private;
6171 poll_wait(filp, &md_event_waiters, wait);
6173 /* always allow read */
6174 mask = POLLIN | POLLRDNORM;
6176 if (mi->event != atomic_read(&md_event_count))
6177 mask |= POLLERR | POLLPRI;
6181 static const struct file_operations md_seq_fops = {
6182 .owner = THIS_MODULE,
6183 .open = md_seq_open,
6185 .llseek = seq_lseek,
6186 .release = seq_release_private,
6187 .poll = mdstat_poll,
6190 int register_md_personality(struct mdk_personality *p)
6192 spin_lock(&pers_lock);
6193 list_add_tail(&p->list, &pers_list);
6194 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6195 spin_unlock(&pers_lock);
6199 int unregister_md_personality(struct mdk_personality *p)
6201 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6202 spin_lock(&pers_lock);
6203 list_del_init(&p->list);
6204 spin_unlock(&pers_lock);
6208 static int is_mddev_idle(mddev_t *mddev, int init)
6216 rdev_for_each_rcu(rdev, mddev) {
6217 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6218 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6219 (int)part_stat_read(&disk->part0, sectors[1]) -
6220 atomic_read(&disk->sync_io);
6221 /* sync IO will cause sync_io to increase before the disk_stats
6222 * as sync_io is counted when a request starts, and
6223 * disk_stats is counted when it completes.
6224 * So resync activity will cause curr_events to be smaller than
6225 * when there was no such activity.
6226 * non-sync IO will cause disk_stat to increase without
6227 * increasing sync_io so curr_events will (eventually)
6228 * be larger than it was before. Once it becomes
6229 * substantially larger, the test below will cause
6230 * the array to appear non-idle, and resync will slow
6232 * If there is a lot of outstanding resync activity when
6233 * we set last_event to curr_events, then all that activity
6234 * completing might cause the array to appear non-idle
6235 * and resync will be slowed down even though there might
6236 * not have been non-resync activity. This will only
6237 * happen once though. 'last_events' will soon reflect
6238 * the state where there is little or no outstanding
6239 * resync requests, and further resync activity will
6240 * always make curr_events less than last_events.
6243 if (init || curr_events - rdev->last_events > 64) {
6244 rdev->last_events = curr_events;
6252 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6254 /* another "blocks" (512byte) blocks have been synced */
6255 atomic_sub(blocks, &mddev->recovery_active);
6256 wake_up(&mddev->recovery_wait);
6258 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6259 md_wakeup_thread(mddev->thread);
6260 // stop recovery, signal do_sync ....
6265 /* md_write_start(mddev, bi)
6266 * If we need to update some array metadata (e.g. 'active' flag
6267 * in superblock) before writing, schedule a superblock update
6268 * and wait for it to complete.
6270 void md_write_start(mddev_t *mddev, struct bio *bi)
6273 if (bio_data_dir(bi) != WRITE)
6276 BUG_ON(mddev->ro == 1);
6277 if (mddev->ro == 2) {
6278 /* need to switch to read/write */
6280 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6281 md_wakeup_thread(mddev->thread);
6282 md_wakeup_thread(mddev->sync_thread);
6285 atomic_inc(&mddev->writes_pending);
6286 if (mddev->safemode == 1)
6287 mddev->safemode = 0;
6288 if (mddev->in_sync) {
6289 spin_lock_irq(&mddev->write_lock);
6290 if (mddev->in_sync) {
6292 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6293 md_wakeup_thread(mddev->thread);
6296 spin_unlock_irq(&mddev->write_lock);
6299 sysfs_notify_dirent(mddev->sysfs_state);
6300 wait_event(mddev->sb_wait,
6301 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6302 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6305 void md_write_end(mddev_t *mddev)
6307 if (atomic_dec_and_test(&mddev->writes_pending)) {
6308 if (mddev->safemode == 2)
6309 md_wakeup_thread(mddev->thread);
6310 else if (mddev->safemode_delay)
6311 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6315 /* md_allow_write(mddev)
6316 * Calling this ensures that the array is marked 'active' so that writes
6317 * may proceed without blocking. It is important to call this before
6318 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6319 * Must be called with mddev_lock held.
6321 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6322 * is dropped, so return -EAGAIN after notifying userspace.
6324 int md_allow_write(mddev_t *mddev)
6330 if (!mddev->pers->sync_request)
6333 spin_lock_irq(&mddev->write_lock);
6334 if (mddev->in_sync) {
6336 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6337 if (mddev->safemode_delay &&
6338 mddev->safemode == 0)
6339 mddev->safemode = 1;
6340 spin_unlock_irq(&mddev->write_lock);
6341 md_update_sb(mddev, 0);
6342 sysfs_notify_dirent(mddev->sysfs_state);
6344 spin_unlock_irq(&mddev->write_lock);
6346 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6351 EXPORT_SYMBOL_GPL(md_allow_write);
6353 #define SYNC_MARKS 10
6354 #define SYNC_MARK_STEP (3*HZ)
6355 void md_do_sync(mddev_t *mddev)
6358 unsigned int currspeed = 0,
6360 sector_t max_sectors,j, io_sectors;
6361 unsigned long mark[SYNC_MARKS];
6362 sector_t mark_cnt[SYNC_MARKS];
6364 struct list_head *tmp;
6365 sector_t last_check;
6370 /* just incase thread restarts... */
6371 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6373 if (mddev->ro) /* never try to sync a read-only array */
6376 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6377 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6378 desc = "data-check";
6379 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6380 desc = "requested-resync";
6383 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6388 /* we overload curr_resync somewhat here.
6389 * 0 == not engaged in resync at all
6390 * 2 == checking that there is no conflict with another sync
6391 * 1 == like 2, but have yielded to allow conflicting resync to
6393 * other == active in resync - this many blocks
6395 * Before starting a resync we must have set curr_resync to
6396 * 2, and then checked that every "conflicting" array has curr_resync
6397 * less than ours. When we find one that is the same or higher
6398 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6399 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6400 * This will mean we have to start checking from the beginning again.
6405 mddev->curr_resync = 2;
6408 if (kthread_should_stop()) {
6409 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6412 for_each_mddev(mddev2, tmp) {
6413 if (mddev2 == mddev)
6415 if (!mddev->parallel_resync
6416 && mddev2->curr_resync
6417 && match_mddev_units(mddev, mddev2)) {
6419 if (mddev < mddev2 && mddev->curr_resync == 2) {
6420 /* arbitrarily yield */
6421 mddev->curr_resync = 1;
6422 wake_up(&resync_wait);
6424 if (mddev > mddev2 && mddev->curr_resync == 1)
6425 /* no need to wait here, we can wait the next
6426 * time 'round when curr_resync == 2
6429 /* We need to wait 'interruptible' so as not to
6430 * contribute to the load average, and not to
6431 * be caught by 'softlockup'
6433 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6434 if (!kthread_should_stop() &&
6435 mddev2->curr_resync >= mddev->curr_resync) {
6436 printk(KERN_INFO "md: delaying %s of %s"
6437 " until %s has finished (they"
6438 " share one or more physical units)\n",
6439 desc, mdname(mddev), mdname(mddev2));
6441 if (signal_pending(current))
6442 flush_signals(current);
6444 finish_wait(&resync_wait, &wq);
6447 finish_wait(&resync_wait, &wq);
6450 } while (mddev->curr_resync < 2);
6453 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6454 /* resync follows the size requested by the personality,
6455 * which defaults to physical size, but can be virtual size
6457 max_sectors = mddev->resync_max_sectors;
6458 mddev->resync_mismatches = 0;
6459 /* we don't use the checkpoint if there's a bitmap */
6460 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6461 j = mddev->resync_min;
6462 else if (!mddev->bitmap)
6463 j = mddev->recovery_cp;
6465 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6466 max_sectors = mddev->dev_sectors;
6468 /* recovery follows the physical size of devices */
6469 max_sectors = mddev->dev_sectors;
6471 list_for_each_entry(rdev, &mddev->disks, same_set)
6472 if (rdev->raid_disk >= 0 &&
6473 !test_bit(Faulty, &rdev->flags) &&
6474 !test_bit(In_sync, &rdev->flags) &&
6475 rdev->recovery_offset < j)
6476 j = rdev->recovery_offset;
6479 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6480 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
6481 " %d KB/sec/disk.\n", speed_min(mddev));
6482 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6483 "(but not more than %d KB/sec) for %s.\n",
6484 speed_max(mddev), desc);
6486 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6489 for (m = 0; m < SYNC_MARKS; m++) {
6491 mark_cnt[m] = io_sectors;
6494 mddev->resync_mark = mark[last_mark];
6495 mddev->resync_mark_cnt = mark_cnt[last_mark];
6498 * Tune reconstruction:
6500 window = 32*(PAGE_SIZE/512);
6501 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6502 window/2,(unsigned long long) max_sectors/2);
6504 atomic_set(&mddev->recovery_active, 0);
6509 "md: resuming %s of %s from checkpoint.\n",
6510 desc, mdname(mddev));
6511 mddev->curr_resync = j;
6513 mddev->curr_resync_completed = mddev->curr_resync;
6515 while (j < max_sectors) {
6520 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6521 ((mddev->curr_resync > mddev->curr_resync_completed &&
6522 (mddev->curr_resync - mddev->curr_resync_completed)
6523 > (max_sectors >> 4)) ||
6524 (j - mddev->curr_resync_completed)*2
6525 >= mddev->resync_max - mddev->curr_resync_completed
6527 /* time to update curr_resync_completed */
6528 blk_unplug(mddev->queue);
6529 wait_event(mddev->recovery_wait,
6530 atomic_read(&mddev->recovery_active) == 0);
6531 mddev->curr_resync_completed =
6533 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6534 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6537 while (j >= mddev->resync_max && !kthread_should_stop()) {
6538 /* As this condition is controlled by user-space,
6539 * we can block indefinitely, so use '_interruptible'
6540 * to avoid triggering warnings.
6542 flush_signals(current); /* just in case */
6543 wait_event_interruptible(mddev->recovery_wait,
6544 mddev->resync_max > j
6545 || kthread_should_stop());
6548 if (kthread_should_stop())
6551 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6552 currspeed < speed_min(mddev));
6554 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6558 if (!skipped) { /* actual IO requested */
6559 io_sectors += sectors;
6560 atomic_add(sectors, &mddev->recovery_active);
6564 if (j>1) mddev->curr_resync = j;
6565 mddev->curr_mark_cnt = io_sectors;
6566 if (last_check == 0)
6567 /* this is the earliers that rebuilt will be
6568 * visible in /proc/mdstat
6570 md_new_event(mddev);
6572 if (last_check + window > io_sectors || j == max_sectors)
6575 last_check = io_sectors;
6577 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6581 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6583 int next = (last_mark+1) % SYNC_MARKS;
6585 mddev->resync_mark = mark[next];
6586 mddev->resync_mark_cnt = mark_cnt[next];
6587 mark[next] = jiffies;
6588 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6593 if (kthread_should_stop())
6598 * this loop exits only if either when we are slower than
6599 * the 'hard' speed limit, or the system was IO-idle for
6601 * the system might be non-idle CPU-wise, but we only care
6602 * about not overloading the IO subsystem. (things like an
6603 * e2fsck being done on the RAID array should execute fast)
6605 blk_unplug(mddev->queue);
6608 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6609 /((jiffies-mddev->resync_mark)/HZ +1) +1;
6611 if (currspeed > speed_min(mddev)) {
6612 if ((currspeed > speed_max(mddev)) ||
6613 !is_mddev_idle(mddev, 0)) {
6619 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6621 * this also signals 'finished resyncing' to md_stop
6624 blk_unplug(mddev->queue);
6626 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6628 /* tell personality that we are finished */
6629 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6631 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6632 mddev->curr_resync > 2) {
6633 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6634 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6635 if (mddev->curr_resync >= mddev->recovery_cp) {
6637 "md: checkpointing %s of %s.\n",
6638 desc, mdname(mddev));
6639 mddev->recovery_cp = mddev->curr_resync;
6642 mddev->recovery_cp = MaxSector;
6644 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6645 mddev->curr_resync = MaxSector;
6646 list_for_each_entry(rdev, &mddev->disks, same_set)
6647 if (rdev->raid_disk >= 0 &&
6648 !test_bit(Faulty, &rdev->flags) &&
6649 !test_bit(In_sync, &rdev->flags) &&
6650 rdev->recovery_offset < mddev->curr_resync)
6651 rdev->recovery_offset = mddev->curr_resync;
6654 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6657 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6658 /* We completed so min/max setting can be forgotten if used. */
6659 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6660 mddev->resync_min = 0;
6661 mddev->resync_max = MaxSector;
6662 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6663 mddev->resync_min = mddev->curr_resync_completed;
6664 mddev->curr_resync = 0;
6665 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6666 mddev->curr_resync_completed = 0;
6667 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6668 wake_up(&resync_wait);
6669 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6670 md_wakeup_thread(mddev->thread);
6675 * got a signal, exit.
6678 "md: md_do_sync() got signal ... exiting\n");
6679 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6683 EXPORT_SYMBOL_GPL(md_do_sync);
6686 static int remove_and_add_spares(mddev_t *mddev)
6691 mddev->curr_resync_completed = 0;
6693 list_for_each_entry(rdev, &mddev->disks, same_set)
6694 if (rdev->raid_disk >= 0 &&
6695 !test_bit(Blocked, &rdev->flags) &&
6696 (test_bit(Faulty, &rdev->flags) ||
6697 ! test_bit(In_sync, &rdev->flags)) &&
6698 atomic_read(&rdev->nr_pending)==0) {
6699 if (mddev->pers->hot_remove_disk(
6700 mddev, rdev->raid_disk)==0) {
6702 sprintf(nm,"rd%d", rdev->raid_disk);
6703 sysfs_remove_link(&mddev->kobj, nm);
6704 rdev->raid_disk = -1;
6708 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6709 list_for_each_entry(rdev, &mddev->disks, same_set) {
6710 if (rdev->raid_disk >= 0 &&
6711 !test_bit(In_sync, &rdev->flags) &&
6712 !test_bit(Blocked, &rdev->flags))
6714 if (rdev->raid_disk < 0
6715 && !test_bit(Faulty, &rdev->flags)) {
6716 rdev->recovery_offset = 0;
6718 hot_add_disk(mddev, rdev) == 0) {
6720 sprintf(nm, "rd%d", rdev->raid_disk);
6721 if (sysfs_create_link(&mddev->kobj,
6724 "md: cannot register "
6728 md_new_event(mddev);
6737 * This routine is regularly called by all per-raid-array threads to
6738 * deal with generic issues like resync and super-block update.
6739 * Raid personalities that don't have a thread (linear/raid0) do not
6740 * need this as they never do any recovery or update the superblock.
6742 * It does not do any resync itself, but rather "forks" off other threads
6743 * to do that as needed.
6744 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6745 * "->recovery" and create a thread at ->sync_thread.
6746 * When the thread finishes it sets MD_RECOVERY_DONE
6747 * and wakeups up this thread which will reap the thread and finish up.
6748 * This thread also removes any faulty devices (with nr_pending == 0).
6750 * The overall approach is:
6751 * 1/ if the superblock needs updating, update it.
6752 * 2/ If a recovery thread is running, don't do anything else.
6753 * 3/ If recovery has finished, clean up, possibly marking spares active.
6754 * 4/ If there are any faulty devices, remove them.
6755 * 5/ If array is degraded, try to add spares devices
6756 * 6/ If array has spares or is not in-sync, start a resync thread.
6758 void md_check_recovery(mddev_t *mddev)
6764 bitmap_daemon_work(mddev);
6769 if (signal_pending(current)) {
6770 if (mddev->pers->sync_request && !mddev->external) {
6771 printk(KERN_INFO "md: %s in immediate safe mode\n",
6773 mddev->safemode = 2;
6775 flush_signals(current);
6778 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6781 (mddev->flags && !mddev->external) ||
6782 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6783 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6784 (mddev->external == 0 && mddev->safemode == 1) ||
6785 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6786 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6790 if (mddev_trylock(mddev)) {
6794 /* Only thing we do on a ro array is remove
6797 remove_and_add_spares(mddev);
6798 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6802 if (!mddev->external) {
6804 spin_lock_irq(&mddev->write_lock);
6805 if (mddev->safemode &&
6806 !atomic_read(&mddev->writes_pending) &&
6808 mddev->recovery_cp == MaxSector) {
6811 if (mddev->persistent)
6812 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6814 if (mddev->safemode == 1)
6815 mddev->safemode = 0;
6816 spin_unlock_irq(&mddev->write_lock);
6818 sysfs_notify_dirent(mddev->sysfs_state);
6822 md_update_sb(mddev, 0);
6824 list_for_each_entry(rdev, &mddev->disks, same_set)
6825 if (test_and_clear_bit(StateChanged, &rdev->flags))
6826 sysfs_notify_dirent(rdev->sysfs_state);
6829 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6830 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6831 /* resync/recovery still happening */
6832 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6835 if (mddev->sync_thread) {
6836 /* resync has finished, collect result */
6837 md_unregister_thread(mddev->sync_thread);
6838 mddev->sync_thread = NULL;
6839 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6840 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6842 /* activate any spares */
6843 if (mddev->pers->spare_active(mddev))
6844 sysfs_notify(&mddev->kobj, NULL,
6847 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6848 mddev->pers->finish_reshape)
6849 mddev->pers->finish_reshape(mddev);
6850 md_update_sb(mddev, 1);
6852 /* if array is no-longer degraded, then any saved_raid_disk
6853 * information must be scrapped
6855 if (!mddev->degraded)
6856 list_for_each_entry(rdev, &mddev->disks, same_set)
6857 rdev->saved_raid_disk = -1;
6859 mddev->recovery = 0;
6860 /* flag recovery needed just to double check */
6861 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6862 sysfs_notify_dirent(mddev->sysfs_action);
6863 md_new_event(mddev);
6866 /* Set RUNNING before clearing NEEDED to avoid
6867 * any transients in the value of "sync_action".
6869 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6870 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6871 /* Clear some bits that don't mean anything, but
6874 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6875 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6877 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6879 /* no recovery is running.
6880 * remove any failed drives, then
6881 * add spares if possible.
6882 * Spare are also removed and re-added, to allow
6883 * the personality to fail the re-add.
6886 if (mddev->reshape_position != MaxSector) {
6887 if (mddev->pers->check_reshape == NULL ||
6888 mddev->pers->check_reshape(mddev) != 0)
6889 /* Cannot proceed */
6891 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6892 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6893 } else if ((spares = remove_and_add_spares(mddev))) {
6894 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6895 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6896 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6897 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6898 } else if (mddev->recovery_cp < MaxSector) {
6899 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6900 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6901 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6902 /* nothing to be done ... */
6905 if (mddev->pers->sync_request) {
6906 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6907 /* We are adding a device or devices to an array
6908 * which has the bitmap stored on all devices.
6909 * So make sure all bitmap pages get written
6911 bitmap_write_all(mddev->bitmap);
6913 mddev->sync_thread = md_register_thread(md_do_sync,
6916 if (!mddev->sync_thread) {
6917 printk(KERN_ERR "%s: could not start resync"
6920 /* leave the spares where they are, it shouldn't hurt */
6921 mddev->recovery = 0;
6923 md_wakeup_thread(mddev->sync_thread);
6924 sysfs_notify_dirent(mddev->sysfs_action);
6925 md_new_event(mddev);
6928 if (!mddev->sync_thread) {
6929 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6930 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6932 if (mddev->sysfs_action)
6933 sysfs_notify_dirent(mddev->sysfs_action);
6935 mddev_unlock(mddev);
6939 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6941 sysfs_notify_dirent(rdev->sysfs_state);
6942 wait_event_timeout(rdev->blocked_wait,
6943 !test_bit(Blocked, &rdev->flags),
6944 msecs_to_jiffies(5000));
6945 rdev_dec_pending(rdev, mddev);
6947 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6949 static int md_notify_reboot(struct notifier_block *this,
6950 unsigned long code, void *x)
6952 struct list_head *tmp;
6955 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6957 printk(KERN_INFO "md: stopping all md devices.\n");
6959 for_each_mddev(mddev, tmp)
6960 if (mddev_trylock(mddev)) {
6961 /* Force a switch to readonly even array
6962 * appears to still be in use. Hence
6965 do_md_stop(mddev, 1, 100);
6966 mddev_unlock(mddev);
6969 * certain more exotic SCSI devices are known to be
6970 * volatile wrt too early system reboots. While the
6971 * right place to handle this issue is the given
6972 * driver, we do want to have a safe RAID driver ...
6979 static struct notifier_block md_notifier = {
6980 .notifier_call = md_notify_reboot,
6982 .priority = INT_MAX, /* before any real devices */
6985 static void md_geninit(void)
6987 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6989 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6992 static int __init md_init(void)
6994 if (register_blkdev(MD_MAJOR, "md"))
6996 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6997 unregister_blkdev(MD_MAJOR, "md");
7000 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
7001 md_probe, NULL, NULL);
7002 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
7003 md_probe, NULL, NULL);
7005 register_reboot_notifier(&md_notifier);
7006 raid_table_header = register_sysctl_table(raid_root_table);
7016 * Searches all registered partitions for autorun RAID arrays
7020 static LIST_HEAD(all_detected_devices);
7021 struct detected_devices_node {
7022 struct list_head list;
7026 void md_autodetect_dev(dev_t dev)
7028 struct detected_devices_node *node_detected_dev;
7030 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
7031 if (node_detected_dev) {
7032 node_detected_dev->dev = dev;
7033 list_add_tail(&node_detected_dev->list, &all_detected_devices);
7035 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
7036 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
7041 static void autostart_arrays(int part)
7044 struct detected_devices_node *node_detected_dev;
7046 int i_scanned, i_passed;
7051 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
7053 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
7055 node_detected_dev = list_entry(all_detected_devices.next,
7056 struct detected_devices_node, list);
7057 list_del(&node_detected_dev->list);
7058 dev = node_detected_dev->dev;
7059 kfree(node_detected_dev);
7060 rdev = md_import_device(dev,0, 90);
7064 if (test_bit(Faulty, &rdev->flags)) {
7068 set_bit(AutoDetected, &rdev->flags);
7069 list_add(&rdev->same_set, &pending_raid_disks);
7073 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
7074 i_scanned, i_passed);
7076 autorun_devices(part);
7079 #endif /* !MODULE */
7081 static __exit void md_exit(void)
7084 struct list_head *tmp;
7086 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
7087 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
7089 unregister_blkdev(MD_MAJOR,"md");
7090 unregister_blkdev(mdp_major, "mdp");
7091 unregister_reboot_notifier(&md_notifier);
7092 unregister_sysctl_table(raid_table_header);
7093 remove_proc_entry("mdstat", NULL);
7094 for_each_mddev(mddev, tmp) {
7095 export_array(mddev);
7096 mddev->hold_active = 0;
7100 subsys_initcall(md_init);
7101 module_exit(md_exit)
7103 static int get_ro(char *buffer, struct kernel_param *kp)
7105 return sprintf(buffer, "%d", start_readonly);
7107 static int set_ro(const char *val, struct kernel_param *kp)
7110 int num = simple_strtoul(val, &e, 10);
7111 if (*val && (*e == '\0' || *e == '\n')) {
7112 start_readonly = num;
7118 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
7119 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
7121 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
7123 EXPORT_SYMBOL(register_md_personality);
7124 EXPORT_SYMBOL(unregister_md_personality);
7125 EXPORT_SYMBOL(md_error);
7126 EXPORT_SYMBOL(md_done_sync);
7127 EXPORT_SYMBOL(md_write_start);
7128 EXPORT_SYMBOL(md_write_end);
7129 EXPORT_SYMBOL(md_register_thread);
7130 EXPORT_SYMBOL(md_unregister_thread);
7131 EXPORT_SYMBOL(md_wakeup_thread);
7132 EXPORT_SYMBOL(md_check_recovery);
7133 MODULE_LICENSE("GPL");
7135 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);