4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
8 #include <linux/init.h>
10 #include <linux/fcntl.h>
11 #include <linux/slab.h>
12 #include <linux/kmod.h>
13 #include <linux/major.h>
14 #include <linux/smp_lock.h>
15 #include <linux/device_cgroup.h>
16 #include <linux/highmem.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/blkpg.h>
20 #include <linux/buffer_head.h>
21 #include <linux/pagevec.h>
22 #include <linux/writeback.h>
23 #include <linux/mpage.h>
24 #include <linux/mount.h>
25 #include <linux/uio.h>
26 #include <linux/namei.h>
27 #include <linux/log2.h>
28 #include <linux/kmemleak.h>
29 #include <asm/uaccess.h>
33 struct block_device bdev;
34 struct inode vfs_inode;
37 static const struct address_space_operations def_blk_aops;
39 static inline struct bdev_inode *BDEV_I(struct inode *inode)
41 return container_of(inode, struct bdev_inode, vfs_inode);
44 inline struct block_device *I_BDEV(struct inode *inode)
46 return &BDEV_I(inode)->bdev;
49 EXPORT_SYMBOL(I_BDEV);
51 static sector_t max_block(struct block_device *bdev)
53 sector_t retval = ~((sector_t)0);
54 loff_t sz = i_size_read(bdev->bd_inode);
57 unsigned int size = block_size(bdev);
58 unsigned int sizebits = blksize_bits(size);
59 retval = (sz >> sizebits);
64 /* Kill _all_ buffers and pagecache , dirty or not.. */
65 static void kill_bdev(struct block_device *bdev)
67 if (bdev->bd_inode->i_mapping->nrpages == 0)
70 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
73 int set_blocksize(struct block_device *bdev, int size)
75 /* Size must be a power of two, and between 512 and PAGE_SIZE */
76 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
79 /* Size cannot be smaller than the size supported by the device */
80 if (size < bdev_logical_block_size(bdev))
83 /* Don't change the size if it is same as current */
84 if (bdev->bd_block_size != size) {
86 bdev->bd_block_size = size;
87 bdev->bd_inode->i_blkbits = blksize_bits(size);
93 EXPORT_SYMBOL(set_blocksize);
95 int sb_set_blocksize(struct super_block *sb, int size)
97 if (set_blocksize(sb->s_bdev, size))
99 /* If we get here, we know size is power of two
100 * and it's value is between 512 and PAGE_SIZE */
101 sb->s_blocksize = size;
102 sb->s_blocksize_bits = blksize_bits(size);
103 return sb->s_blocksize;
106 EXPORT_SYMBOL(sb_set_blocksize);
108 int sb_min_blocksize(struct super_block *sb, int size)
110 int minsize = bdev_logical_block_size(sb->s_bdev);
113 return sb_set_blocksize(sb, size);
116 EXPORT_SYMBOL(sb_min_blocksize);
119 blkdev_get_block(struct inode *inode, sector_t iblock,
120 struct buffer_head *bh, int create)
122 if (iblock >= max_block(I_BDEV(inode))) {
127 * for reads, we're just trying to fill a partial page.
128 * return a hole, they will have to call get_block again
129 * before they can fill it, and they will get -EIO at that
134 bh->b_bdev = I_BDEV(inode);
135 bh->b_blocknr = iblock;
136 set_buffer_mapped(bh);
141 blkdev_get_blocks(struct inode *inode, sector_t iblock,
142 struct buffer_head *bh, int create)
144 sector_t end_block = max_block(I_BDEV(inode));
145 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
147 if ((iblock + max_blocks) > end_block) {
148 max_blocks = end_block - iblock;
149 if ((long)max_blocks <= 0) {
151 return -EIO; /* write fully beyond EOF */
153 * It is a read which is fully beyond EOF. We return
154 * a !buffer_mapped buffer
160 bh->b_bdev = I_BDEV(inode);
161 bh->b_blocknr = iblock;
162 bh->b_size = max_blocks << inode->i_blkbits;
164 set_buffer_mapped(bh);
169 blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
170 loff_t offset, unsigned long nr_segs)
172 struct file *file = iocb->ki_filp;
173 struct inode *inode = file->f_mapping->host;
175 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
176 iov, offset, nr_segs, blkdev_get_blocks, NULL);
180 * Write out and wait upon all the dirty data associated with a block
181 * device via its mapping. Does not take the superblock lock.
183 int sync_blockdev(struct block_device *bdev)
188 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
191 EXPORT_SYMBOL(sync_blockdev);
194 * Write out and wait upon all dirty data associated with this
195 * device. Filesystem data as well as the underlying block
196 * device. Takes the superblock lock.
198 int fsync_bdev(struct block_device *bdev)
200 struct super_block *sb = get_super(bdev);
202 int res = fsync_super(sb);
206 return sync_blockdev(bdev);
208 EXPORT_SYMBOL(fsync_bdev);
211 * freeze_bdev -- lock a filesystem and force it into a consistent state
212 * @bdev: blockdevice to lock
214 * This takes the block device bd_mount_sem to make sure no new mounts
215 * happen on bdev until thaw_bdev() is called.
216 * If a superblock is found on this device, we take the s_umount semaphore
217 * on it to make sure nobody unmounts until the snapshot creation is done.
218 * The reference counter (bd_fsfreeze_count) guarantees that only the last
219 * unfreeze process can unfreeze the frozen filesystem actually when multiple
220 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
221 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
224 struct super_block *freeze_bdev(struct block_device *bdev)
226 struct super_block *sb;
229 mutex_lock(&bdev->bd_fsfreeze_mutex);
230 if (bdev->bd_fsfreeze_count > 0) {
231 bdev->bd_fsfreeze_count++;
232 sb = get_super(bdev);
233 mutex_unlock(&bdev->bd_fsfreeze_mutex);
236 bdev->bd_fsfreeze_count++;
238 down(&bdev->bd_mount_sem);
239 sb = get_super(bdev);
240 if (sb && !(sb->s_flags & MS_RDONLY)) {
241 sb->s_frozen = SB_FREEZE_WRITE;
246 sb->s_frozen = SB_FREEZE_TRANS;
249 sync_blockdev(sb->s_bdev);
251 if (sb->s_op->freeze_fs) {
252 error = sb->s_op->freeze_fs(sb);
255 "VFS:Filesystem freeze failed\n");
256 sb->s_frozen = SB_UNFROZEN;
258 up(&bdev->bd_mount_sem);
259 bdev->bd_fsfreeze_count--;
260 mutex_unlock(&bdev->bd_fsfreeze_mutex);
261 return ERR_PTR(error);
267 mutex_unlock(&bdev->bd_fsfreeze_mutex);
269 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
271 EXPORT_SYMBOL(freeze_bdev);
274 * thaw_bdev -- unlock filesystem
275 * @bdev: blockdevice to unlock
276 * @sb: associated superblock
278 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
280 int thaw_bdev(struct block_device *bdev, struct super_block *sb)
284 mutex_lock(&bdev->bd_fsfreeze_mutex);
285 if (!bdev->bd_fsfreeze_count) {
286 mutex_unlock(&bdev->bd_fsfreeze_mutex);
290 bdev->bd_fsfreeze_count--;
291 if (bdev->bd_fsfreeze_count > 0) {
294 mutex_unlock(&bdev->bd_fsfreeze_mutex);
299 BUG_ON(sb->s_bdev != bdev);
300 if (!(sb->s_flags & MS_RDONLY)) {
301 if (sb->s_op->unfreeze_fs) {
302 error = sb->s_op->unfreeze_fs(sb);
305 "VFS:Filesystem thaw failed\n");
306 sb->s_frozen = SB_FREEZE_TRANS;
307 bdev->bd_fsfreeze_count++;
308 mutex_unlock(&bdev->bd_fsfreeze_mutex);
312 sb->s_frozen = SB_UNFROZEN;
314 wake_up(&sb->s_wait_unfrozen);
319 up(&bdev->bd_mount_sem);
320 mutex_unlock(&bdev->bd_fsfreeze_mutex);
323 EXPORT_SYMBOL(thaw_bdev);
325 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
327 return block_write_full_page(page, blkdev_get_block, wbc);
330 static int blkdev_readpage(struct file * file, struct page * page)
332 return block_read_full_page(page, blkdev_get_block);
335 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
336 loff_t pos, unsigned len, unsigned flags,
337 struct page **pagep, void **fsdata)
340 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
344 static int blkdev_write_end(struct file *file, struct address_space *mapping,
345 loff_t pos, unsigned len, unsigned copied,
346 struct page *page, void *fsdata)
349 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
352 page_cache_release(page);
359 * for a block special file file->f_path.dentry->d_inode->i_size is zero
360 * so we compute the size by hand (just as in block_read/write above)
362 static loff_t block_llseek(struct file *file, loff_t offset, int origin)
364 struct inode *bd_inode = file->f_mapping->host;
368 mutex_lock(&bd_inode->i_mutex);
369 size = i_size_read(bd_inode);
376 offset += file->f_pos;
379 if (offset >= 0 && offset <= size) {
380 if (offset != file->f_pos) {
381 file->f_pos = offset;
385 mutex_unlock(&bd_inode->i_mutex);
390 * Filp is never NULL; the only case when ->fsync() is called with
391 * NULL first argument is nfsd_sync_dir() and that's not a directory.
394 static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
396 return sync_blockdev(I_BDEV(filp->f_mapping->host));
403 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
404 static struct kmem_cache * bdev_cachep __read_mostly;
406 static struct inode *bdev_alloc_inode(struct super_block *sb)
408 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
411 return &ei->vfs_inode;
414 static void bdev_destroy_inode(struct inode *inode)
416 struct bdev_inode *bdi = BDEV_I(inode);
418 bdi->bdev.bd_inode_backing_dev_info = NULL;
419 kmem_cache_free(bdev_cachep, bdi);
422 static void init_once(void *foo)
424 struct bdev_inode *ei = (struct bdev_inode *) foo;
425 struct block_device *bdev = &ei->bdev;
427 memset(bdev, 0, sizeof(*bdev));
428 mutex_init(&bdev->bd_mutex);
429 sema_init(&bdev->bd_mount_sem, 1);
430 INIT_LIST_HEAD(&bdev->bd_inodes);
431 INIT_LIST_HEAD(&bdev->bd_list);
433 INIT_LIST_HEAD(&bdev->bd_holder_list);
435 inode_init_once(&ei->vfs_inode);
436 /* Initialize mutex for freeze. */
437 mutex_init(&bdev->bd_fsfreeze_mutex);
440 static inline void __bd_forget(struct inode *inode)
442 list_del_init(&inode->i_devices);
443 inode->i_bdev = NULL;
444 inode->i_mapping = &inode->i_data;
447 static void bdev_clear_inode(struct inode *inode)
449 struct block_device *bdev = &BDEV_I(inode)->bdev;
451 spin_lock(&bdev_lock);
452 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
453 __bd_forget(list_entry(p, struct inode, i_devices));
455 list_del_init(&bdev->bd_list);
456 spin_unlock(&bdev_lock);
459 static const struct super_operations bdev_sops = {
460 .statfs = simple_statfs,
461 .alloc_inode = bdev_alloc_inode,
462 .destroy_inode = bdev_destroy_inode,
463 .drop_inode = generic_delete_inode,
464 .clear_inode = bdev_clear_inode,
467 static int bd_get_sb(struct file_system_type *fs_type,
468 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
470 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
473 static struct file_system_type bd_type = {
476 .kill_sb = kill_anon_super,
479 struct super_block *blockdev_superblock __read_mostly;
481 void __init bdev_cache_init(void)
484 struct vfsmount *bd_mnt;
486 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
487 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
488 SLAB_MEM_SPREAD|SLAB_PANIC),
490 err = register_filesystem(&bd_type);
492 panic("Cannot register bdev pseudo-fs");
493 bd_mnt = kern_mount(&bd_type);
495 panic("Cannot create bdev pseudo-fs");
497 * This vfsmount structure is only used to obtain the
498 * blockdev_superblock, so tell kmemleak not to report it.
500 kmemleak_not_leak(bd_mnt);
501 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
505 * Most likely _very_ bad one - but then it's hardly critical for small
506 * /dev and can be fixed when somebody will need really large one.
507 * Keep in mind that it will be fed through icache hash function too.
509 static inline unsigned long hash(dev_t dev)
511 return MAJOR(dev)+MINOR(dev);
514 static int bdev_test(struct inode *inode, void *data)
516 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
519 static int bdev_set(struct inode *inode, void *data)
521 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
525 static LIST_HEAD(all_bdevs);
527 struct block_device *bdget(dev_t dev)
529 struct block_device *bdev;
532 inode = iget5_locked(blockdev_superblock, hash(dev),
533 bdev_test, bdev_set, &dev);
538 bdev = &BDEV_I(inode)->bdev;
540 if (inode->i_state & I_NEW) {
541 bdev->bd_contains = NULL;
542 bdev->bd_inode = inode;
543 bdev->bd_block_size = (1 << inode->i_blkbits);
544 bdev->bd_part_count = 0;
545 bdev->bd_invalidated = 0;
546 inode->i_mode = S_IFBLK;
548 inode->i_bdev = bdev;
549 inode->i_data.a_ops = &def_blk_aops;
550 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
551 inode->i_data.backing_dev_info = &default_backing_dev_info;
552 spin_lock(&bdev_lock);
553 list_add(&bdev->bd_list, &all_bdevs);
554 spin_unlock(&bdev_lock);
555 unlock_new_inode(inode);
560 EXPORT_SYMBOL(bdget);
562 long nr_blockdev_pages(void)
564 struct block_device *bdev;
566 spin_lock(&bdev_lock);
567 list_for_each_entry(bdev, &all_bdevs, bd_list) {
568 ret += bdev->bd_inode->i_mapping->nrpages;
570 spin_unlock(&bdev_lock);
574 void bdput(struct block_device *bdev)
576 iput(bdev->bd_inode);
579 EXPORT_SYMBOL(bdput);
581 static struct block_device *bd_acquire(struct inode *inode)
583 struct block_device *bdev;
585 spin_lock(&bdev_lock);
586 bdev = inode->i_bdev;
588 atomic_inc(&bdev->bd_inode->i_count);
589 spin_unlock(&bdev_lock);
592 spin_unlock(&bdev_lock);
594 bdev = bdget(inode->i_rdev);
596 spin_lock(&bdev_lock);
597 if (!inode->i_bdev) {
599 * We take an additional bd_inode->i_count for inode,
600 * and it's released in clear_inode() of inode.
601 * So, we can access it via ->i_mapping always
604 atomic_inc(&bdev->bd_inode->i_count);
605 inode->i_bdev = bdev;
606 inode->i_mapping = bdev->bd_inode->i_mapping;
607 list_add(&inode->i_devices, &bdev->bd_inodes);
609 spin_unlock(&bdev_lock);
614 /* Call when you free inode */
616 void bd_forget(struct inode *inode)
618 struct block_device *bdev = NULL;
620 spin_lock(&bdev_lock);
622 if (!sb_is_blkdev_sb(inode->i_sb))
623 bdev = inode->i_bdev;
626 spin_unlock(&bdev_lock);
629 iput(bdev->bd_inode);
632 int bd_claim(struct block_device *bdev, void *holder)
635 spin_lock(&bdev_lock);
637 /* first decide result */
638 if (bdev->bd_holder == holder)
639 res = 0; /* already a holder */
640 else if (bdev->bd_holder != NULL)
641 res = -EBUSY; /* held by someone else */
642 else if (bdev->bd_contains == bdev)
643 res = 0; /* is a whole device which isn't held */
645 else if (bdev->bd_contains->bd_holder == bd_claim)
646 res = 0; /* is a partition of a device that is being partitioned */
647 else if (bdev->bd_contains->bd_holder != NULL)
648 res = -EBUSY; /* is a partition of a held device */
650 res = 0; /* is a partition of an un-held device */
652 /* now impose change */
654 /* note that for a whole device bd_holders
655 * will be incremented twice, and bd_holder will
656 * be set to bd_claim before being set to holder
658 bdev->bd_contains->bd_holders ++;
659 bdev->bd_contains->bd_holder = bd_claim;
661 bdev->bd_holder = holder;
663 spin_unlock(&bdev_lock);
667 EXPORT_SYMBOL(bd_claim);
669 void bd_release(struct block_device *bdev)
671 spin_lock(&bdev_lock);
672 if (!--bdev->bd_contains->bd_holders)
673 bdev->bd_contains->bd_holder = NULL;
674 if (!--bdev->bd_holders)
675 bdev->bd_holder = NULL;
676 spin_unlock(&bdev_lock);
679 EXPORT_SYMBOL(bd_release);
683 * Functions for bd_claim_by_kobject / bd_release_from_kobject
685 * If a kobject is passed to bd_claim_by_kobject()
686 * and the kobject has a parent directory,
687 * following symlinks are created:
688 * o from the kobject to the claimed bdev
689 * o from "holders" directory of the bdev to the parent of the kobject
690 * bd_release_from_kobject() removes these symlinks.
693 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
694 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
695 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
696 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
699 static int add_symlink(struct kobject *from, struct kobject *to)
703 return sysfs_create_link(from, to, kobject_name(to));
706 static void del_symlink(struct kobject *from, struct kobject *to)
710 sysfs_remove_link(from, kobject_name(to));
714 * 'struct bd_holder' contains pointers to kobjects symlinked by
715 * bd_claim_by_kobject.
716 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
719 struct list_head list; /* chain of holders of the bdev */
720 int count; /* references from the holder */
721 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
722 struct kobject *hdev; /* e.g. "/block/dm-0" */
723 struct kobject *hdir; /* e.g. "/block/sda/holders" */
724 struct kobject *sdev; /* e.g. "/block/sda" */
728 * Get references of related kobjects at once.
729 * Returns 1 on success. 0 on failure.
731 * Should call bd_holder_release_dirs() after successful use.
733 static int bd_holder_grab_dirs(struct block_device *bdev,
734 struct bd_holder *bo)
739 bo->sdir = kobject_get(bo->sdir);
743 bo->hdev = kobject_get(bo->sdir->parent);
747 bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
751 bo->hdir = kobject_get(bdev->bd_part->holder_dir);
758 kobject_put(bo->sdev);
760 kobject_put(bo->hdev);
762 kobject_put(bo->sdir);
767 /* Put references of related kobjects at once. */
768 static void bd_holder_release_dirs(struct bd_holder *bo)
770 kobject_put(bo->hdir);
771 kobject_put(bo->sdev);
772 kobject_put(bo->hdev);
773 kobject_put(bo->sdir);
776 static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
778 struct bd_holder *bo;
780 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
790 static void free_bd_holder(struct bd_holder *bo)
796 * find_bd_holder - find matching struct bd_holder from the block device
798 * @bdev: struct block device to be searched
799 * @bo: target struct bd_holder
801 * Returns matching entry with @bo in @bdev->bd_holder_list.
802 * If found, increment the reference count and return the pointer.
803 * If not found, returns NULL.
805 static struct bd_holder *find_bd_holder(struct block_device *bdev,
806 struct bd_holder *bo)
808 struct bd_holder *tmp;
810 list_for_each_entry(tmp, &bdev->bd_holder_list, list)
811 if (tmp->sdir == bo->sdir) {
820 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
822 * @bdev: block device to be bd_claimed
823 * @bo: preallocated and initialized by alloc_bd_holder()
825 * Add @bo to @bdev->bd_holder_list, create symlinks.
827 * Returns 0 if symlinks are created.
828 * Returns -ve if something fails.
830 static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
837 if (!bd_holder_grab_dirs(bdev, bo))
840 err = add_symlink(bo->sdir, bo->sdev);
844 err = add_symlink(bo->hdir, bo->hdev);
846 del_symlink(bo->sdir, bo->sdev);
850 list_add_tail(&bo->list, &bdev->bd_holder_list);
855 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
857 * @bdev: block device to be bd_claimed
858 * @kobj: holder's kobject
860 * If there is matching entry with @kobj in @bdev->bd_holder_list
861 * and no other bd_claim() from the same kobject,
862 * remove the struct bd_holder from the list, delete symlinks for it.
864 * Returns a pointer to the struct bd_holder when it's removed from the list
865 * and ready to be freed.
866 * Returns NULL if matching claim isn't found or there is other bd_claim()
867 * by the same kobject.
869 static struct bd_holder *del_bd_holder(struct block_device *bdev,
870 struct kobject *kobj)
872 struct bd_holder *bo;
874 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
875 if (bo->sdir == kobj) {
877 BUG_ON(bo->count < 0);
880 del_symlink(bo->sdir, bo->sdev);
881 del_symlink(bo->hdir, bo->hdev);
882 bd_holder_release_dirs(bo);
893 * bd_claim_by_kobject - bd_claim() with additional kobject signature
895 * @bdev: block device to be claimed
896 * @holder: holder's signature
897 * @kobj: holder's kobject
899 * Do bd_claim() and if it succeeds, create sysfs symlinks between
900 * the bdev and the holder's kobject.
901 * Use bd_release_from_kobject() when relesing the claimed bdev.
903 * Returns 0 on success. (same as bd_claim())
904 * Returns errno on failure.
906 static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
907 struct kobject *kobj)
910 struct bd_holder *bo, *found;
915 bo = alloc_bd_holder(kobj);
919 mutex_lock(&bdev->bd_mutex);
921 err = bd_claim(bdev, holder);
925 found = find_bd_holder(bdev, bo);
929 err = add_bd_holder(bdev, bo);
935 mutex_unlock(&bdev->bd_mutex);
941 * bd_release_from_kobject - bd_release() with additional kobject signature
943 * @bdev: block device to be released
944 * @kobj: holder's kobject
946 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
948 static void bd_release_from_kobject(struct block_device *bdev,
949 struct kobject *kobj)
954 mutex_lock(&bdev->bd_mutex);
956 free_bd_holder(del_bd_holder(bdev, kobj));
957 mutex_unlock(&bdev->bd_mutex);
961 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
963 * @bdev: block device to be claimed
964 * @holder: holder's signature
965 * @disk: holder's gendisk
967 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
969 int bd_claim_by_disk(struct block_device *bdev, void *holder,
970 struct gendisk *disk)
972 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
974 EXPORT_SYMBOL_GPL(bd_claim_by_disk);
977 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
979 * @bdev: block device to be claimed
980 * @disk: holder's gendisk
982 * Call bd_release_from_kobject() and put @disk->slave_dir.
984 void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
986 bd_release_from_kobject(bdev, disk->slave_dir);
987 kobject_put(disk->slave_dir);
989 EXPORT_SYMBOL_GPL(bd_release_from_disk);
993 * Tries to open block device by device number. Use it ONLY if you
994 * really do not have anything better - i.e. when you are behind a
995 * truly sucky interface and all you are given is a device number. _Never_
996 * to be used for internal purposes. If you ever need it - reconsider
999 struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
1001 struct block_device *bdev = bdget(dev);
1004 err = blkdev_get(bdev, mode);
1005 return err ? ERR_PTR(err) : bdev;
1008 EXPORT_SYMBOL(open_by_devnum);
1011 * flush_disk - invalidates all buffer-cache entries on a disk
1013 * @bdev: struct block device to be flushed
1015 * Invalidates all buffer-cache entries on a disk. It should be called
1016 * when a disk has been changed -- either by a media change or online
1019 static void flush_disk(struct block_device *bdev)
1021 if (__invalidate_device(bdev)) {
1022 char name[BDEVNAME_SIZE] = "";
1025 disk_name(bdev->bd_disk, 0, name);
1026 printk(KERN_WARNING "VFS: busy inodes on changed media or "
1027 "resized disk %s\n", name);
1032 if (disk_partitionable(bdev->bd_disk))
1033 bdev->bd_invalidated = 1;
1037 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1038 * @disk: struct gendisk to check
1039 * @bdev: struct bdev to adjust.
1041 * This routine checks to see if the bdev size does not match the disk size
1042 * and adjusts it if it differs.
1044 void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1046 loff_t disk_size, bdev_size;
1048 disk_size = (loff_t)get_capacity(disk) << 9;
1049 bdev_size = i_size_read(bdev->bd_inode);
1050 if (disk_size != bdev_size) {
1051 char name[BDEVNAME_SIZE];
1053 disk_name(disk, 0, name);
1055 "%s: detected capacity change from %lld to %lld\n",
1056 name, bdev_size, disk_size);
1057 i_size_write(bdev->bd_inode, disk_size);
1061 EXPORT_SYMBOL(check_disk_size_change);
1064 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
1065 * @disk: struct gendisk to be revalidated
1067 * This routine is a wrapper for lower-level driver's revalidate_disk
1068 * call-backs. It is used to do common pre and post operations needed
1069 * for all revalidate_disk operations.
1071 int revalidate_disk(struct gendisk *disk)
1073 struct block_device *bdev;
1076 if (disk->fops->revalidate_disk)
1077 ret = disk->fops->revalidate_disk(disk);
1079 bdev = bdget_disk(disk, 0);
1083 mutex_lock(&bdev->bd_mutex);
1084 check_disk_size_change(disk, bdev);
1085 mutex_unlock(&bdev->bd_mutex);
1089 EXPORT_SYMBOL(revalidate_disk);
1092 * This routine checks whether a removable media has been changed,
1093 * and invalidates all buffer-cache-entries in that case. This
1094 * is a relatively slow routine, so we have to try to minimize using
1095 * it. Thus it is called only upon a 'mount' or 'open'. This
1096 * is the best way of combining speed and utility, I think.
1097 * People changing diskettes in the middle of an operation deserve
1100 int check_disk_change(struct block_device *bdev)
1102 struct gendisk *disk = bdev->bd_disk;
1103 struct block_device_operations * bdops = disk->fops;
1105 if (!bdops->media_changed)
1107 if (!bdops->media_changed(bdev->bd_disk))
1111 if (bdops->revalidate_disk)
1112 bdops->revalidate_disk(bdev->bd_disk);
1116 EXPORT_SYMBOL(check_disk_change);
1118 void bd_set_size(struct block_device *bdev, loff_t size)
1120 unsigned bsize = bdev_logical_block_size(bdev);
1122 bdev->bd_inode->i_size = size;
1123 while (bsize < PAGE_CACHE_SIZE) {
1128 bdev->bd_block_size = bsize;
1129 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1131 EXPORT_SYMBOL(bd_set_size);
1133 static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1138 * mutex_lock(part->bd_mutex)
1139 * mutex_lock_nested(whole->bd_mutex, 1)
1142 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1144 struct gendisk *disk;
1149 if (mode & FMODE_READ)
1151 if (mode & FMODE_WRITE)
1154 * hooks: /n/, see "layering violations".
1156 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1166 disk = get_gendisk(bdev->bd_dev, &partno);
1168 goto out_unlock_kernel;
1170 mutex_lock_nested(&bdev->bd_mutex, for_part);
1171 if (!bdev->bd_openers) {
1172 bdev->bd_disk = disk;
1173 bdev->bd_contains = bdev;
1175 struct backing_dev_info *bdi;
1178 bdev->bd_part = disk_get_part(disk, partno);
1182 if (disk->fops->open) {
1183 ret = disk->fops->open(bdev, mode);
1184 if (ret == -ERESTARTSYS) {
1185 /* Lost a race with 'disk' being
1186 * deleted, try again.
1189 disk_put_part(bdev->bd_part);
1190 bdev->bd_part = NULL;
1191 module_put(disk->fops->owner);
1193 bdev->bd_disk = NULL;
1194 mutex_unlock(&bdev->bd_mutex);
1200 if (!bdev->bd_openers) {
1201 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1202 bdi = blk_get_backing_dev_info(bdev);
1204 bdi = &default_backing_dev_info;
1205 bdev->bd_inode->i_data.backing_dev_info = bdi;
1207 if (bdev->bd_invalidated)
1208 rescan_partitions(disk, bdev);
1210 struct block_device *whole;
1211 whole = bdget_disk(disk, 0);
1216 ret = __blkdev_get(whole, mode, 1);
1219 bdev->bd_contains = whole;
1220 bdev->bd_inode->i_data.backing_dev_info =
1221 whole->bd_inode->i_data.backing_dev_info;
1222 bdev->bd_part = disk_get_part(disk, partno);
1223 if (!(disk->flags & GENHD_FL_UP) ||
1224 !bdev->bd_part || !bdev->bd_part->nr_sects) {
1228 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1232 module_put(disk->fops->owner);
1234 if (bdev->bd_contains == bdev) {
1235 if (bdev->bd_disk->fops->open) {
1236 ret = bdev->bd_disk->fops->open(bdev, mode);
1238 goto out_unlock_bdev;
1240 if (bdev->bd_invalidated)
1241 rescan_partitions(bdev->bd_disk, bdev);
1246 bdev->bd_part_count++;
1247 mutex_unlock(&bdev->bd_mutex);
1252 disk_put_part(bdev->bd_part);
1253 bdev->bd_disk = NULL;
1254 bdev->bd_part = NULL;
1255 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1256 if (bdev != bdev->bd_contains)
1257 __blkdev_put(bdev->bd_contains, mode, 1);
1258 bdev->bd_contains = NULL;
1260 mutex_unlock(&bdev->bd_mutex);
1265 module_put(disk->fops->owner);
1272 int blkdev_get(struct block_device *bdev, fmode_t mode)
1274 return __blkdev_get(bdev, mode, 0);
1276 EXPORT_SYMBOL(blkdev_get);
1278 static int blkdev_open(struct inode * inode, struct file * filp)
1280 struct block_device *bdev;
1284 * Preserve backwards compatibility and allow large file access
1285 * even if userspace doesn't ask for it explicitly. Some mkfs
1286 * binary needs it. We might want to drop this workaround
1287 * during an unstable branch.
1289 filp->f_flags |= O_LARGEFILE;
1291 if (filp->f_flags & O_NDELAY)
1292 filp->f_mode |= FMODE_NDELAY;
1293 if (filp->f_flags & O_EXCL)
1294 filp->f_mode |= FMODE_EXCL;
1295 if ((filp->f_flags & O_ACCMODE) == 3)
1296 filp->f_mode |= FMODE_WRITE_IOCTL;
1298 bdev = bd_acquire(inode);
1302 filp->f_mapping = bdev->bd_inode->i_mapping;
1304 res = blkdev_get(bdev, filp->f_mode);
1308 if (filp->f_mode & FMODE_EXCL) {
1309 res = bd_claim(bdev, filp);
1311 goto out_blkdev_put;
1317 blkdev_put(bdev, filp->f_mode);
1321 static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1324 struct gendisk *disk = bdev->bd_disk;
1325 struct block_device *victim = NULL;
1327 mutex_lock_nested(&bdev->bd_mutex, for_part);
1330 bdev->bd_part_count--;
1332 if (!--bdev->bd_openers) {
1333 sync_blockdev(bdev);
1336 if (bdev->bd_contains == bdev) {
1337 if (disk->fops->release)
1338 ret = disk->fops->release(disk, mode);
1340 if (!bdev->bd_openers) {
1341 struct module *owner = disk->fops->owner;
1345 disk_put_part(bdev->bd_part);
1346 bdev->bd_part = NULL;
1347 bdev->bd_disk = NULL;
1348 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1349 if (bdev != bdev->bd_contains)
1350 victim = bdev->bd_contains;
1351 bdev->bd_contains = NULL;
1354 mutex_unlock(&bdev->bd_mutex);
1357 __blkdev_put(victim, mode, 1);
1361 int blkdev_put(struct block_device *bdev, fmode_t mode)
1363 return __blkdev_put(bdev, mode, 0);
1365 EXPORT_SYMBOL(blkdev_put);
1367 static int blkdev_close(struct inode * inode, struct file * filp)
1369 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1370 if (bdev->bd_holder == filp)
1372 return blkdev_put(bdev, filp->f_mode);
1375 static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1377 struct block_device *bdev = I_BDEV(file->f_mapping->host);
1378 fmode_t mode = file->f_mode;
1381 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1382 * to updated it before every ioctl.
1384 if (file->f_flags & O_NDELAY)
1385 mode |= FMODE_NDELAY;
1387 mode &= ~FMODE_NDELAY;
1389 return blkdev_ioctl(bdev, mode, cmd, arg);
1393 * Try to release a page associated with block device when the system
1394 * is under memory pressure.
1396 static int blkdev_releasepage(struct page *page, gfp_t wait)
1398 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1400 if (super && super->s_op->bdev_try_to_free_page)
1401 return super->s_op->bdev_try_to_free_page(super, page, wait);
1403 return try_to_free_buffers(page);
1406 static const struct address_space_operations def_blk_aops = {
1407 .readpage = blkdev_readpage,
1408 .writepage = blkdev_writepage,
1409 .sync_page = block_sync_page,
1410 .write_begin = blkdev_write_begin,
1411 .write_end = blkdev_write_end,
1412 .writepages = generic_writepages,
1413 .releasepage = blkdev_releasepage,
1414 .direct_IO = blkdev_direct_IO,
1417 const struct file_operations def_blk_fops = {
1418 .open = blkdev_open,
1419 .release = blkdev_close,
1420 .llseek = block_llseek,
1421 .read = do_sync_read,
1422 .write = do_sync_write,
1423 .aio_read = generic_file_aio_read,
1424 .aio_write = generic_file_aio_write_nolock,
1425 .mmap = generic_file_mmap,
1426 .fsync = block_fsync,
1427 .unlocked_ioctl = block_ioctl,
1428 #ifdef CONFIG_COMPAT
1429 .compat_ioctl = compat_blkdev_ioctl,
1431 .splice_read = generic_file_splice_read,
1432 .splice_write = generic_file_splice_write,
1435 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1438 mm_segment_t old_fs = get_fs();
1440 res = blkdev_ioctl(bdev, 0, cmd, arg);
1445 EXPORT_SYMBOL(ioctl_by_bdev);
1448 * lookup_bdev - lookup a struct block_device by name
1449 * @pathname: special file representing the block device
1451 * Get a reference to the blockdevice at @pathname in the current
1452 * namespace if possible and return it. Return ERR_PTR(error)
1455 struct block_device *lookup_bdev(const char *pathname)
1457 struct block_device *bdev;
1458 struct inode *inode;
1462 if (!pathname || !*pathname)
1463 return ERR_PTR(-EINVAL);
1465 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1467 return ERR_PTR(error);
1469 inode = path.dentry->d_inode;
1471 if (!S_ISBLK(inode->i_mode))
1474 if (path.mnt->mnt_flags & MNT_NODEV)
1477 bdev = bd_acquire(inode);
1484 bdev = ERR_PTR(error);
1487 EXPORT_SYMBOL(lookup_bdev);
1490 * open_bdev_exclusive - open a block device by name and set it up for use
1492 * @path: special file representing the block device
1493 * @mode: FMODE_... combination to pass be used
1494 * @holder: owner for exclusion
1496 * Open the blockdevice described by the special file at @path, claim it
1499 struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
1501 struct block_device *bdev;
1504 bdev = lookup_bdev(path);
1508 error = blkdev_get(bdev, mode);
1510 return ERR_PTR(error);
1512 if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
1514 error = bd_claim(bdev, holder);
1521 blkdev_put(bdev, mode);
1522 return ERR_PTR(error);
1525 EXPORT_SYMBOL(open_bdev_exclusive);
1528 * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive()
1530 * @bdev: blockdevice to close
1531 * @mode: mode, must match that used to open.
1533 * This is the counterpart to open_bdev_exclusive().
1535 void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
1538 blkdev_put(bdev, mode);
1541 EXPORT_SYMBOL(close_bdev_exclusive);
1543 int __invalidate_device(struct block_device *bdev)
1545 struct super_block *sb = get_super(bdev);
1550 * no need to lock the super, get_super holds the
1551 * read mutex so the filesystem cannot go away
1552 * under us (->put_super runs with the write lock
1555 shrink_dcache_sb(sb);
1556 res = invalidate_inodes(sb);
1559 invalidate_bdev(bdev);
1562 EXPORT_SYMBOL(__invalidate_device);