4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/cleancache.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
56 EXPORT_SYMBOL(init_buffer);
58 static int sleep_on_buffer(void *word)
64 void __lock_buffer(struct buffer_head *bh)
66 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67 TASK_UNINTERRUPTIBLE);
69 EXPORT_SYMBOL(__lock_buffer);
71 void unlock_buffer(struct buffer_head *bh)
73 clear_bit_unlock(BH_Lock, &bh->b_state);
74 smp_mb__after_clear_bit();
75 wake_up_bit(&bh->b_state, BH_Lock);
77 EXPORT_SYMBOL(unlock_buffer);
80 * Block until a buffer comes unlocked. This doesn't stop it
81 * from becoming locked again - you have to lock it yourself
82 * if you want to preserve its state.
84 void __wait_on_buffer(struct buffer_head * bh)
86 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
88 EXPORT_SYMBOL(__wait_on_buffer);
91 __clear_page_buffers(struct page *page)
93 ClearPagePrivate(page);
94 set_page_private(page, 0);
95 page_cache_release(page);
99 static int quiet_error(struct buffer_head *bh)
101 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
107 static void buffer_io_error(struct buffer_head *bh)
109 char b[BDEVNAME_SIZE];
110 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 bdevname(bh->b_bdev, b),
112 (unsigned long long)bh->b_blocknr);
116 * End-of-IO handler helper function which does not touch the bh after
118 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119 * a race there is benign: unlock_buffer() only use the bh's address for
120 * hashing after unlocking the buffer, so it doesn't actually touch the bh
123 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
126 set_buffer_uptodate(bh);
128 /* This happens, due to failed READA attempts. */
129 clear_buffer_uptodate(bh);
135 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
136 * unlock the buffer. This is what ll_rw_block uses too.
138 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
140 __end_buffer_read_notouch(bh, uptodate);
143 EXPORT_SYMBOL(end_buffer_read_sync);
145 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
147 char b[BDEVNAME_SIZE];
150 set_buffer_uptodate(bh);
152 if (!quiet_error(bh)) {
154 printk(KERN_WARNING "lost page write due to "
156 bdevname(bh->b_bdev, b));
158 set_buffer_write_io_error(bh);
159 clear_buffer_uptodate(bh);
164 EXPORT_SYMBOL(end_buffer_write_sync);
167 * Various filesystems appear to want __find_get_block to be non-blocking.
168 * But it's the page lock which protects the buffers. To get around this,
169 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173 * may be quite high. This code could TryLock the page, and if that
174 * succeeds, there is no need to take private_lock. (But if
175 * private_lock is contended then so is mapping->tree_lock).
177 static struct buffer_head *
178 __find_get_block_slow(struct block_device *bdev, sector_t block)
180 struct inode *bd_inode = bdev->bd_inode;
181 struct address_space *bd_mapping = bd_inode->i_mapping;
182 struct buffer_head *ret = NULL;
184 struct buffer_head *bh;
185 struct buffer_head *head;
189 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190 page = find_get_page(bd_mapping, index);
194 spin_lock(&bd_mapping->private_lock);
195 if (!page_has_buffers(page))
197 head = page_buffers(page);
200 if (!buffer_mapped(bh))
202 else if (bh->b_blocknr == block) {
207 bh = bh->b_this_page;
208 } while (bh != head);
210 /* we might be here because some of the buffers on this page are
211 * not mapped. This is due to various races between
212 * file io on the block device and getblk. It gets dealt with
213 * elsewhere, don't buffer_error if we had some unmapped buffers
216 printk("__find_get_block_slow() failed. "
217 "block=%llu, b_blocknr=%llu\n",
218 (unsigned long long)block,
219 (unsigned long long)bh->b_blocknr);
220 printk("b_state=0x%08lx, b_size=%zu\n",
221 bh->b_state, bh->b_size);
222 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 spin_unlock(&bd_mapping->private_lock);
226 page_cache_release(page);
231 /* If invalidate_buffers() will trash dirty buffers, it means some kind
232 of fs corruption is going on. Trashing dirty data always imply losing
233 information that was supposed to be just stored on the physical layer
236 Thus invalidate_buffers in general usage is not allwowed to trash
237 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
238 be preserved. These buffers are simply skipped.
240 We also skip buffers which are still in use. For example this can
241 happen if a userspace program is reading the block device.
243 NOTE: In the case where the user removed a removable-media-disk even if
244 there's still dirty data not synced on disk (due a bug in the device driver
245 or due an error of the user), by not destroying the dirty buffers we could
246 generate corruption also on the next media inserted, thus a parameter is
247 necessary to handle this case in the most safe way possible (trying
248 to not corrupt also the new disk inserted with the data belonging to
249 the old now corrupted disk). Also for the ramdisk the natural thing
250 to do in order to release the ramdisk memory is to destroy dirty buffers.
252 These are two special cases. Normal usage imply the device driver
253 to issue a sync on the device (without waiting I/O completion) and
254 then an invalidate_buffers call that doesn't trash dirty buffers.
256 For handling cache coherency with the blkdev pagecache the 'update' case
257 is been introduced. It is needed to re-read from disk any pinned
258 buffer. NOTE: re-reading from disk is destructive so we can do it only
259 when we assume nobody is changing the buffercache under our I/O and when
260 we think the disk contains more recent information than the buffercache.
261 The update == 1 pass marks the buffers we need to update, the update == 2
262 pass does the actual I/O. */
263 void invalidate_bdev(struct block_device *bdev)
265 struct address_space *mapping = bdev->bd_inode->i_mapping;
267 if (mapping->nrpages == 0)
270 invalidate_bh_lrus();
271 lru_add_drain_all(); /* make sure all lru add caches are flushed */
272 invalidate_mapping_pages(mapping, 0, -1);
273 /* 99% of the time, we don't need to flush the cleancache on the bdev.
274 * But, for the strange corners, lets be cautious
276 cleancache_flush_inode(mapping);
278 EXPORT_SYMBOL(invalidate_bdev);
281 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
283 static void free_more_memory(void)
288 wakeup_flusher_threads(1024);
291 for_each_online_node(nid) {
292 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
293 gfp_zone(GFP_NOFS), NULL,
296 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
302 * I/O completion handler for block_read_full_page() - pages
303 * which come unlocked at the end of I/O.
305 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
308 struct buffer_head *first;
309 struct buffer_head *tmp;
311 int page_uptodate = 1;
313 BUG_ON(!buffer_async_read(bh));
317 set_buffer_uptodate(bh);
319 clear_buffer_uptodate(bh);
320 if (!quiet_error(bh))
326 * Be _very_ careful from here on. Bad things can happen if
327 * two buffer heads end IO at almost the same time and both
328 * decide that the page is now completely done.
330 first = page_buffers(page);
331 local_irq_save(flags);
332 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
333 clear_buffer_async_read(bh);
337 if (!buffer_uptodate(tmp))
339 if (buffer_async_read(tmp)) {
340 BUG_ON(!buffer_locked(tmp));
343 tmp = tmp->b_this_page;
345 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
346 local_irq_restore(flags);
349 * If none of the buffers had errors and they are all
350 * uptodate then we can set the page uptodate.
352 if (page_uptodate && !PageError(page))
353 SetPageUptodate(page);
358 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
359 local_irq_restore(flags);
364 * Completion handler for block_write_full_page() - pages which are unlocked
365 * during I/O, and which have PageWriteback cleared upon I/O completion.
367 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
369 char b[BDEVNAME_SIZE];
371 struct buffer_head *first;
372 struct buffer_head *tmp;
375 BUG_ON(!buffer_async_write(bh));
379 set_buffer_uptodate(bh);
381 if (!quiet_error(bh)) {
383 printk(KERN_WARNING "lost page write due to "
385 bdevname(bh->b_bdev, b));
387 set_bit(AS_EIO, &page->mapping->flags);
388 set_buffer_write_io_error(bh);
389 clear_buffer_uptodate(bh);
393 first = page_buffers(page);
394 local_irq_save(flags);
395 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
397 clear_buffer_async_write(bh);
399 tmp = bh->b_this_page;
401 if (buffer_async_write(tmp)) {
402 BUG_ON(!buffer_locked(tmp));
405 tmp = tmp->b_this_page;
407 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 local_irq_restore(flags);
409 end_page_writeback(page);
413 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
414 local_irq_restore(flags);
417 EXPORT_SYMBOL(end_buffer_async_write);
420 * If a page's buffers are under async readin (end_buffer_async_read
421 * completion) then there is a possibility that another thread of
422 * control could lock one of the buffers after it has completed
423 * but while some of the other buffers have not completed. This
424 * locked buffer would confuse end_buffer_async_read() into not unlocking
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
426 * that this buffer is not under async I/O.
428 * The page comes unlocked when it has no locked buffer_async buffers
431 * PageLocked prevents anyone starting new async I/O reads any of
434 * PageWriteback is used to prevent simultaneous writeout of the same
437 * PageLocked prevents anyone from starting writeback of a page which is
438 * under read I/O (PageWriteback is only ever set against a locked page).
440 static void mark_buffer_async_read(struct buffer_head *bh)
442 bh->b_end_io = end_buffer_async_read;
443 set_buffer_async_read(bh);
446 static void mark_buffer_async_write_endio(struct buffer_head *bh,
447 bh_end_io_t *handler)
449 bh->b_end_io = handler;
450 set_buffer_async_write(bh);
453 void mark_buffer_async_write(struct buffer_head *bh)
455 mark_buffer_async_write_endio(bh, end_buffer_async_write);
457 EXPORT_SYMBOL(mark_buffer_async_write);
461 * fs/buffer.c contains helper functions for buffer-backed address space's
462 * fsync functions. A common requirement for buffer-based filesystems is
463 * that certain data from the backing blockdev needs to be written out for
464 * a successful fsync(). For example, ext2 indirect blocks need to be
465 * written back and waited upon before fsync() returns.
467 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469 * management of a list of dependent buffers at ->i_mapping->private_list.
471 * Locking is a little subtle: try_to_free_buffers() will remove buffers
472 * from their controlling inode's queue when they are being freed. But
473 * try_to_free_buffers() will be operating against the *blockdev* mapping
474 * at the time, not against the S_ISREG file which depends on those buffers.
475 * So the locking for private_list is via the private_lock in the address_space
476 * which backs the buffers. Which is different from the address_space
477 * against which the buffers are listed. So for a particular address_space,
478 * mapping->private_lock does *not* protect mapping->private_list! In fact,
479 * mapping->private_list will always be protected by the backing blockdev's
482 * Which introduces a requirement: all buffers on an address_space's
483 * ->private_list must be from the same address_space: the blockdev's.
485 * address_spaces which do not place buffers at ->private_list via these
486 * utility functions are free to use private_lock and private_list for
487 * whatever they want. The only requirement is that list_empty(private_list)
488 * be true at clear_inode() time.
490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
491 * filesystems should do that. invalidate_inode_buffers() should just go
492 * BUG_ON(!list_empty).
494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
495 * take an address_space, not an inode. And it should be called
496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500 * list if it is already on a list. Because if the buffer is on a list,
501 * it *must* already be on the right one. If not, the filesystem is being
502 * silly. This will save a ton of locking. But first we have to ensure
503 * that buffers are taken *off* the old inode's list when they are freed
504 * (presumably in truncate). That requires careful auditing of all
505 * filesystems (do it inside bforget()). It could also be done by bringing
510 * The buffer's backing address_space's private_lock must be held
512 static void __remove_assoc_queue(struct buffer_head *bh)
514 list_del_init(&bh->b_assoc_buffers);
515 WARN_ON(!bh->b_assoc_map);
516 if (buffer_write_io_error(bh))
517 set_bit(AS_EIO, &bh->b_assoc_map->flags);
518 bh->b_assoc_map = NULL;
521 int inode_has_buffers(struct inode *inode)
523 return !list_empty(&inode->i_data.private_list);
527 * osync is designed to support O_SYNC io. It waits synchronously for
528 * all already-submitted IO to complete, but does not queue any new
529 * writes to the disk.
531 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
532 * you dirty the buffers, and then use osync_inode_buffers to wait for
533 * completion. Any other dirty buffers which are not yet queued for
534 * write will not be flushed to disk by the osync.
536 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
538 struct buffer_head *bh;
544 list_for_each_prev(p, list) {
546 if (buffer_locked(bh)) {
550 if (!buffer_uptodate(bh))
561 static void do_thaw_one(struct super_block *sb, void *unused)
563 char b[BDEVNAME_SIZE];
564 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
565 printk(KERN_WARNING "Emergency Thaw on %s\n",
566 bdevname(sb->s_bdev, b));
569 static void do_thaw_all(struct work_struct *work)
571 iterate_supers(do_thaw_one, NULL);
573 printk(KERN_WARNING "Emergency Thaw complete\n");
577 * emergency_thaw_all -- forcibly thaw every frozen filesystem
579 * Used for emergency unfreeze of all filesystems via SysRq
581 void emergency_thaw_all(void)
583 struct work_struct *work;
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
587 INIT_WORK(work, do_thaw_all);
593 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
594 * @mapping: the mapping which wants those buffers written
596 * Starts I/O against the buffers at mapping->private_list, and waits upon
599 * Basically, this is a convenience function for fsync().
600 * @mapping is a file or directory which needs those buffers to be written for
601 * a successful fsync().
603 int sync_mapping_buffers(struct address_space *mapping)
605 struct address_space *buffer_mapping = mapping->assoc_mapping;
607 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
610 return fsync_buffers_list(&buffer_mapping->private_lock,
611 &mapping->private_list);
613 EXPORT_SYMBOL(sync_mapping_buffers);
616 * Called when we've recently written block `bblock', and it is known that
617 * `bblock' was for a buffer_boundary() buffer. This means that the block at
618 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
619 * dirty, schedule it for IO. So that indirects merge nicely with their data.
621 void write_boundary_block(struct block_device *bdev,
622 sector_t bblock, unsigned blocksize)
624 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
626 if (buffer_dirty(bh))
627 ll_rw_block(WRITE, 1, &bh);
632 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
634 struct address_space *mapping = inode->i_mapping;
635 struct address_space *buffer_mapping = bh->b_page->mapping;
637 mark_buffer_dirty(bh);
638 if (!mapping->assoc_mapping) {
639 mapping->assoc_mapping = buffer_mapping;
641 BUG_ON(mapping->assoc_mapping != buffer_mapping);
643 if (!bh->b_assoc_map) {
644 spin_lock(&buffer_mapping->private_lock);
645 list_move_tail(&bh->b_assoc_buffers,
646 &mapping->private_list);
647 bh->b_assoc_map = mapping;
648 spin_unlock(&buffer_mapping->private_lock);
651 EXPORT_SYMBOL(mark_buffer_dirty_inode);
654 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
657 * If warn is true, then emit a warning if the page is not uptodate and has
658 * not been truncated.
660 static void __set_page_dirty(struct page *page,
661 struct address_space *mapping, int warn)
663 spin_lock_irq(&mapping->tree_lock);
664 if (page->mapping) { /* Race with truncate? */
665 WARN_ON_ONCE(warn && !PageUptodate(page));
666 account_page_dirtied(page, mapping);
667 radix_tree_tag_set(&mapping->page_tree,
668 page_index(page), PAGECACHE_TAG_DIRTY);
670 spin_unlock_irq(&mapping->tree_lock);
671 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 * Add a page to the dirty page list.
677 * It is a sad fact of life that this function is called from several places
678 * deeply under spinlocking. It may not sleep.
680 * If the page has buffers, the uptodate buffers are set dirty, to preserve
681 * dirty-state coherency between the page and the buffers. It the page does
682 * not have buffers then when they are later attached they will all be set
685 * The buffers are dirtied before the page is dirtied. There's a small race
686 * window in which a writepage caller may see the page cleanness but not the
687 * buffer dirtiness. That's fine. If this code were to set the page dirty
688 * before the buffers, a concurrent writepage caller could clear the page dirty
689 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
690 * page on the dirty page list.
692 * We use private_lock to lock against try_to_free_buffers while using the
693 * page's buffer list. Also use this to protect against clean buffers being
694 * added to the page after it was set dirty.
696 * FIXME: may need to call ->reservepage here as well. That's rather up to the
697 * address_space though.
699 int __set_page_dirty_buffers(struct page *page)
702 struct address_space *mapping = page_mapping(page);
704 if (unlikely(!mapping))
705 return !TestSetPageDirty(page);
707 spin_lock(&mapping->private_lock);
708 if (page_has_buffers(page)) {
709 struct buffer_head *head = page_buffers(page);
710 struct buffer_head *bh = head;
713 set_buffer_dirty(bh);
714 bh = bh->b_this_page;
715 } while (bh != head);
717 newly_dirty = !TestSetPageDirty(page);
718 spin_unlock(&mapping->private_lock);
721 __set_page_dirty(page, mapping, 1);
724 EXPORT_SYMBOL(__set_page_dirty_buffers);
727 * Write out and wait upon a list of buffers.
729 * We have conflicting pressures: we want to make sure that all
730 * initially dirty buffers get waited on, but that any subsequently
731 * dirtied buffers don't. After all, we don't want fsync to last
732 * forever if somebody is actively writing to the file.
734 * Do this in two main stages: first we copy dirty buffers to a
735 * temporary inode list, queueing the writes as we go. Then we clean
736 * up, waiting for those writes to complete.
738 * During this second stage, any subsequent updates to the file may end
739 * up refiling the buffer on the original inode's dirty list again, so
740 * there is a chance we will end up with a buffer queued for write but
741 * not yet completed on that list. So, as a final cleanup we go through
742 * the osync code to catch these locked, dirty buffers without requeuing
743 * any newly dirty buffers for write.
745 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
747 struct buffer_head *bh;
748 struct list_head tmp;
749 struct address_space *mapping;
751 struct blk_plug plug;
753 INIT_LIST_HEAD(&tmp);
754 blk_start_plug(&plug);
757 while (!list_empty(list)) {
758 bh = BH_ENTRY(list->next);
759 mapping = bh->b_assoc_map;
760 __remove_assoc_queue(bh);
761 /* Avoid race with mark_buffer_dirty_inode() which does
762 * a lockless check and we rely on seeing the dirty bit */
764 if (buffer_dirty(bh) || buffer_locked(bh)) {
765 list_add(&bh->b_assoc_buffers, &tmp);
766 bh->b_assoc_map = mapping;
767 if (buffer_dirty(bh)) {
771 * Ensure any pending I/O completes so that
772 * write_dirty_buffer() actually writes the
773 * current contents - it is a noop if I/O is
774 * still in flight on potentially older
777 write_dirty_buffer(bh, WRITE_SYNC);
780 * Kick off IO for the previous mapping. Note
781 * that we will not run the very last mapping,
782 * wait_on_buffer() will do that for us
783 * through sync_buffer().
792 blk_finish_plug(&plug);
795 while (!list_empty(&tmp)) {
796 bh = BH_ENTRY(tmp.prev);
798 mapping = bh->b_assoc_map;
799 __remove_assoc_queue(bh);
800 /* Avoid race with mark_buffer_dirty_inode() which does
801 * a lockless check and we rely on seeing the dirty bit */
803 if (buffer_dirty(bh)) {
804 list_add(&bh->b_assoc_buffers,
805 &mapping->private_list);
806 bh->b_assoc_map = mapping;
810 if (!buffer_uptodate(bh))
817 err2 = osync_buffers_list(lock, list);
825 * Invalidate any and all dirty buffers on a given inode. We are
826 * probably unmounting the fs, but that doesn't mean we have already
827 * done a sync(). Just drop the buffers from the inode list.
829 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
830 * assumes that all the buffers are against the blockdev. Not true
833 void invalidate_inode_buffers(struct inode *inode)
835 if (inode_has_buffers(inode)) {
836 struct address_space *mapping = &inode->i_data;
837 struct list_head *list = &mapping->private_list;
838 struct address_space *buffer_mapping = mapping->assoc_mapping;
840 spin_lock(&buffer_mapping->private_lock);
841 while (!list_empty(list))
842 __remove_assoc_queue(BH_ENTRY(list->next));
843 spin_unlock(&buffer_mapping->private_lock);
846 EXPORT_SYMBOL(invalidate_inode_buffers);
849 * Remove any clean buffers from the inode's buffer list. This is called
850 * when we're trying to free the inode itself. Those buffers can pin it.
852 * Returns true if all buffers were removed.
854 int remove_inode_buffers(struct inode *inode)
858 if (inode_has_buffers(inode)) {
859 struct address_space *mapping = &inode->i_data;
860 struct list_head *list = &mapping->private_list;
861 struct address_space *buffer_mapping = mapping->assoc_mapping;
863 spin_lock(&buffer_mapping->private_lock);
864 while (!list_empty(list)) {
865 struct buffer_head *bh = BH_ENTRY(list->next);
866 if (buffer_dirty(bh)) {
870 __remove_assoc_queue(bh);
872 spin_unlock(&buffer_mapping->private_lock);
878 * Create the appropriate buffers when given a page for data area and
879 * the size of each buffer.. Use the bh->b_this_page linked list to
880 * follow the buffers created. Return NULL if unable to create more
883 * The retry flag is used to differentiate async IO (paging, swapping)
884 * which may not fail from ordinary buffer allocations.
886 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
889 struct buffer_head *bh, *head;
895 while ((offset -= size) >= 0) {
896 bh = alloc_buffer_head(GFP_NOFS);
901 bh->b_this_page = head;
906 atomic_set(&bh->b_count, 0);
909 /* Link the buffer to its page */
910 set_bh_page(bh, page, offset);
912 init_buffer(bh, NULL, NULL);
916 * In case anything failed, we just free everything we got.
922 head = head->b_this_page;
923 free_buffer_head(bh);
928 * Return failure for non-async IO requests. Async IO requests
929 * are not allowed to fail, so we have to wait until buffer heads
930 * become available. But we don't want tasks sleeping with
931 * partially complete buffers, so all were released above.
936 /* We're _really_ low on memory. Now we just
937 * wait for old buffer heads to become free due to
938 * finishing IO. Since this is an async request and
939 * the reserve list is empty, we're sure there are
940 * async buffer heads in use.
945 EXPORT_SYMBOL_GPL(alloc_page_buffers);
948 link_dev_buffers(struct page *page, struct buffer_head *head)
950 struct buffer_head *bh, *tail;
955 bh = bh->b_this_page;
957 tail->b_this_page = head;
958 attach_page_buffers(page, head);
962 * Initialise the state of a blockdev page's buffers.
965 init_page_buffers(struct page *page, struct block_device *bdev,
966 sector_t block, int size)
968 struct buffer_head *head = page_buffers(page);
969 struct buffer_head *bh = head;
970 int uptodate = PageUptodate(page);
971 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
974 if (!buffer_mapped(bh)) {
975 init_buffer(bh, NULL, NULL);
977 bh->b_blocknr = block;
979 set_buffer_uptodate(bh);
980 if (block < end_block)
981 set_buffer_mapped(bh);
984 bh = bh->b_this_page;
985 } while (bh != head);
989 * Create the page-cache page that contains the requested block.
991 * This is user purely for blockdev mappings.
994 grow_dev_page(struct block_device *bdev, sector_t block,
995 pgoff_t index, int size)
997 struct inode *inode = bdev->bd_inode;
999 struct buffer_head *bh;
1001 page = find_or_create_page(inode->i_mapping, index,
1002 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1006 BUG_ON(!PageLocked(page));
1008 if (page_has_buffers(page)) {
1009 bh = page_buffers(page);
1010 if (bh->b_size == size) {
1011 init_page_buffers(page, bdev, block, size);
1014 if (!try_to_free_buffers(page))
1019 * Allocate some buffers for this page
1021 bh = alloc_page_buffers(page, size, 0);
1026 * Link the page to the buffers and initialise them. Take the
1027 * lock to be atomic wrt __find_get_block(), which does not
1028 * run under the page lock.
1030 spin_lock(&inode->i_mapping->private_lock);
1031 link_dev_buffers(page, bh);
1032 init_page_buffers(page, bdev, block, size);
1033 spin_unlock(&inode->i_mapping->private_lock);
1039 page_cache_release(page);
1044 * Create buffers for the specified block device block's page. If
1045 * that page was dirty, the buffers are set dirty also.
1048 grow_buffers(struct block_device *bdev, sector_t block, int size)
1057 } while ((size << sizebits) < PAGE_SIZE);
1059 index = block >> sizebits;
1062 * Check for a block which wants to lie outside our maximum possible
1063 * pagecache index. (this comparison is done using sector_t types).
1065 if (unlikely(index != block >> sizebits)) {
1066 char b[BDEVNAME_SIZE];
1068 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1070 __func__, (unsigned long long)block,
1074 block = index << sizebits;
1075 /* Create a page with the proper size buffers.. */
1076 page = grow_dev_page(bdev, block, index, size);
1080 page_cache_release(page);
1084 static struct buffer_head *
1085 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1088 struct buffer_head *bh;
1090 /* Size must be multiple of hard sectorsize */
1091 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1092 (size < 512 || size > PAGE_SIZE))) {
1093 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1095 printk(KERN_ERR "logical block size: %d\n",
1096 bdev_logical_block_size(bdev));
1103 bh = __find_get_block(bdev, block, size);
1107 ret = grow_buffers(bdev, block, size);
1111 } else if (ret > 0) {
1112 bh = __find_get_block(bdev, block, size);
1120 * The relationship between dirty buffers and dirty pages:
1122 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1123 * the page is tagged dirty in its radix tree.
1125 * At all times, the dirtiness of the buffers represents the dirtiness of
1126 * subsections of the page. If the page has buffers, the page dirty bit is
1127 * merely a hint about the true dirty state.
1129 * When a page is set dirty in its entirety, all its buffers are marked dirty
1130 * (if the page has buffers).
1132 * When a buffer is marked dirty, its page is dirtied, but the page's other
1135 * Also. When blockdev buffers are explicitly read with bread(), they
1136 * individually become uptodate. But their backing page remains not
1137 * uptodate - even if all of its buffers are uptodate. A subsequent
1138 * block_read_full_page() against that page will discover all the uptodate
1139 * buffers, will set the page uptodate and will perform no I/O.
1143 * mark_buffer_dirty - mark a buffer_head as needing writeout
1144 * @bh: the buffer_head to mark dirty
1146 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1147 * backing page dirty, then tag the page as dirty in its address_space's radix
1148 * tree and then attach the address_space's inode to its superblock's dirty
1151 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1152 * mapping->tree_lock and mapping->host->i_lock.
1154 void mark_buffer_dirty(struct buffer_head *bh)
1156 WARN_ON_ONCE(!buffer_uptodate(bh));
1159 * Very *carefully* optimize the it-is-already-dirty case.
1161 * Don't let the final "is it dirty" escape to before we
1162 * perhaps modified the buffer.
1164 if (buffer_dirty(bh)) {
1166 if (buffer_dirty(bh))
1170 if (!test_set_buffer_dirty(bh)) {
1171 struct page *page = bh->b_page;
1172 if (!TestSetPageDirty(page)) {
1173 struct address_space *mapping = page_mapping(page);
1175 __set_page_dirty(page, mapping, 0);
1179 EXPORT_SYMBOL(mark_buffer_dirty);
1182 * Decrement a buffer_head's reference count. If all buffers against a page
1183 * have zero reference count, are clean and unlocked, and if the page is clean
1184 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1185 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1186 * a page but it ends up not being freed, and buffers may later be reattached).
1188 void __brelse(struct buffer_head * buf)
1190 if (atomic_read(&buf->b_count)) {
1194 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1196 EXPORT_SYMBOL(__brelse);
1199 * bforget() is like brelse(), except it discards any
1200 * potentially dirty data.
1202 void __bforget(struct buffer_head *bh)
1204 clear_buffer_dirty(bh);
1205 if (bh->b_assoc_map) {
1206 struct address_space *buffer_mapping = bh->b_page->mapping;
1208 spin_lock(&buffer_mapping->private_lock);
1209 list_del_init(&bh->b_assoc_buffers);
1210 bh->b_assoc_map = NULL;
1211 spin_unlock(&buffer_mapping->private_lock);
1215 EXPORT_SYMBOL(__bforget);
1217 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1220 if (buffer_uptodate(bh)) {
1225 bh->b_end_io = end_buffer_read_sync;
1226 submit_bh(READ, bh);
1228 if (buffer_uptodate(bh))
1236 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1237 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1238 * refcount elevated by one when they're in an LRU. A buffer can only appear
1239 * once in a particular CPU's LRU. A single buffer can be present in multiple
1240 * CPU's LRUs at the same time.
1242 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1243 * sb_find_get_block().
1245 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1246 * a local interrupt disable for that.
1249 #define BH_LRU_SIZE 8
1252 struct buffer_head *bhs[BH_LRU_SIZE];
1255 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1258 #define bh_lru_lock() local_irq_disable()
1259 #define bh_lru_unlock() local_irq_enable()
1261 #define bh_lru_lock() preempt_disable()
1262 #define bh_lru_unlock() preempt_enable()
1265 static inline void check_irqs_on(void)
1267 #ifdef irqs_disabled
1268 BUG_ON(irqs_disabled());
1273 * The LRU management algorithm is dopey-but-simple. Sorry.
1275 static void bh_lru_install(struct buffer_head *bh)
1277 struct buffer_head *evictee = NULL;
1281 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1282 struct buffer_head *bhs[BH_LRU_SIZE];
1288 for (in = 0; in < BH_LRU_SIZE; in++) {
1289 struct buffer_head *bh2 =
1290 __this_cpu_read(bh_lrus.bhs[in]);
1295 if (out >= BH_LRU_SIZE) {
1296 BUG_ON(evictee != NULL);
1303 while (out < BH_LRU_SIZE)
1305 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1314 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1316 static struct buffer_head *
1317 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1319 struct buffer_head *ret = NULL;
1324 for (i = 0; i < BH_LRU_SIZE; i++) {
1325 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1327 if (bh && bh->b_bdev == bdev &&
1328 bh->b_blocknr == block && bh->b_size == size) {
1331 __this_cpu_write(bh_lrus.bhs[i],
1332 __this_cpu_read(bh_lrus.bhs[i - 1]));
1335 __this_cpu_write(bh_lrus.bhs[0], bh);
1347 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1348 * it in the LRU and mark it as accessed. If it is not present then return
1351 struct buffer_head *
1352 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1354 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1357 bh = __find_get_block_slow(bdev, block);
1365 EXPORT_SYMBOL(__find_get_block);
1368 * __getblk will locate (and, if necessary, create) the buffer_head
1369 * which corresponds to the passed block_device, block and size. The
1370 * returned buffer has its reference count incremented.
1372 * __getblk() cannot fail - it just keeps trying. If you pass it an
1373 * illegal block number, __getblk() will happily return a buffer_head
1374 * which represents the non-existent block. Very weird.
1376 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1377 * attempt is failing. FIXME, perhaps?
1379 struct buffer_head *
1380 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1382 struct buffer_head *bh = __find_get_block(bdev, block, size);
1386 bh = __getblk_slow(bdev, block, size);
1389 EXPORT_SYMBOL(__getblk);
1392 * Do async read-ahead on a buffer..
1394 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1396 struct buffer_head *bh = __getblk(bdev, block, size);
1398 ll_rw_block(READA, 1, &bh);
1402 EXPORT_SYMBOL(__breadahead);
1405 * __bread() - reads a specified block and returns the bh
1406 * @bdev: the block_device to read from
1407 * @block: number of block
1408 * @size: size (in bytes) to read
1410 * Reads a specified block, and returns buffer head that contains it.
1411 * It returns NULL if the block was unreadable.
1413 struct buffer_head *
1414 __bread(struct block_device *bdev, sector_t block, unsigned size)
1416 struct buffer_head *bh = __getblk(bdev, block, size);
1418 if (likely(bh) && !buffer_uptodate(bh))
1419 bh = __bread_slow(bh);
1422 EXPORT_SYMBOL(__bread);
1425 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1426 * This doesn't race because it runs in each cpu either in irq
1427 * or with preempt disabled.
1429 static void invalidate_bh_lru(void *arg)
1431 struct bh_lru *b = &get_cpu_var(bh_lrus);
1434 for (i = 0; i < BH_LRU_SIZE; i++) {
1438 put_cpu_var(bh_lrus);
1441 void invalidate_bh_lrus(void)
1443 on_each_cpu(invalidate_bh_lru, NULL, 1);
1445 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1447 void set_bh_page(struct buffer_head *bh,
1448 struct page *page, unsigned long offset)
1451 BUG_ON(offset >= PAGE_SIZE);
1452 if (PageHighMem(page))
1454 * This catches illegal uses and preserves the offset:
1456 bh->b_data = (char *)(0 + offset);
1458 bh->b_data = page_address(page) + offset;
1460 EXPORT_SYMBOL(set_bh_page);
1463 * Called when truncating a buffer on a page completely.
1465 static void discard_buffer(struct buffer_head * bh)
1468 clear_buffer_dirty(bh);
1470 clear_buffer_mapped(bh);
1471 clear_buffer_req(bh);
1472 clear_buffer_new(bh);
1473 clear_buffer_delay(bh);
1474 clear_buffer_unwritten(bh);
1479 * block_invalidatepage - invalidate part of all of a buffer-backed page
1481 * @page: the page which is affected
1482 * @offset: the index of the truncation point
1484 * block_invalidatepage() is called when all or part of the page has become
1485 * invalidatedby a truncate operation.
1487 * block_invalidatepage() does not have to release all buffers, but it must
1488 * ensure that no dirty buffer is left outside @offset and that no I/O
1489 * is underway against any of the blocks which are outside the truncation
1490 * point. Because the caller is about to free (and possibly reuse) those
1493 void block_invalidatepage(struct page *page, unsigned long offset)
1495 struct buffer_head *head, *bh, *next;
1496 unsigned int curr_off = 0;
1498 BUG_ON(!PageLocked(page));
1499 if (!page_has_buffers(page))
1502 head = page_buffers(page);
1505 unsigned int next_off = curr_off + bh->b_size;
1506 next = bh->b_this_page;
1509 * is this block fully invalidated?
1511 if (offset <= curr_off)
1513 curr_off = next_off;
1515 } while (bh != head);
1518 * We release buffers only if the entire page is being invalidated.
1519 * The get_block cached value has been unconditionally invalidated,
1520 * so real IO is not possible anymore.
1523 try_to_release_page(page, 0);
1527 EXPORT_SYMBOL(block_invalidatepage);
1530 * We attach and possibly dirty the buffers atomically wrt
1531 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1532 * is already excluded via the page lock.
1534 void create_empty_buffers(struct page *page,
1535 unsigned long blocksize, unsigned long b_state)
1537 struct buffer_head *bh, *head, *tail;
1539 head = alloc_page_buffers(page, blocksize, 1);
1542 bh->b_state |= b_state;
1544 bh = bh->b_this_page;
1546 tail->b_this_page = head;
1548 spin_lock(&page->mapping->private_lock);
1549 if (PageUptodate(page) || PageDirty(page)) {
1552 if (PageDirty(page))
1553 set_buffer_dirty(bh);
1554 if (PageUptodate(page))
1555 set_buffer_uptodate(bh);
1556 bh = bh->b_this_page;
1557 } while (bh != head);
1559 attach_page_buffers(page, head);
1560 spin_unlock(&page->mapping->private_lock);
1562 EXPORT_SYMBOL(create_empty_buffers);
1565 * We are taking a block for data and we don't want any output from any
1566 * buffer-cache aliases starting from return from that function and
1567 * until the moment when something will explicitly mark the buffer
1568 * dirty (hopefully that will not happen until we will free that block ;-)
1569 * We don't even need to mark it not-uptodate - nobody can expect
1570 * anything from a newly allocated buffer anyway. We used to used
1571 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1572 * don't want to mark the alias unmapped, for example - it would confuse
1573 * anyone who might pick it with bread() afterwards...
1575 * Also.. Note that bforget() doesn't lock the buffer. So there can
1576 * be writeout I/O going on against recently-freed buffers. We don't
1577 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1578 * only if we really need to. That happens here.
1580 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1582 struct buffer_head *old_bh;
1586 old_bh = __find_get_block_slow(bdev, block);
1588 clear_buffer_dirty(old_bh);
1589 wait_on_buffer(old_bh);
1590 clear_buffer_req(old_bh);
1594 EXPORT_SYMBOL(unmap_underlying_metadata);
1597 * NOTE! All mapped/uptodate combinations are valid:
1599 * Mapped Uptodate Meaning
1601 * No No "unknown" - must do get_block()
1602 * No Yes "hole" - zero-filled
1603 * Yes No "allocated" - allocated on disk, not read in
1604 * Yes Yes "valid" - allocated and up-to-date in memory.
1606 * "Dirty" is valid only with the last case (mapped+uptodate).
1610 * While block_write_full_page is writing back the dirty buffers under
1611 * the page lock, whoever dirtied the buffers may decide to clean them
1612 * again at any time. We handle that by only looking at the buffer
1613 * state inside lock_buffer().
1615 * If block_write_full_page() is called for regular writeback
1616 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1617 * locked buffer. This only can happen if someone has written the buffer
1618 * directly, with submit_bh(). At the address_space level PageWriteback
1619 * prevents this contention from occurring.
1621 * If block_write_full_page() is called with wbc->sync_mode ==
1622 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1623 * causes the writes to be flagged as synchronous writes.
1625 static int __block_write_full_page(struct inode *inode, struct page *page,
1626 get_block_t *get_block, struct writeback_control *wbc,
1627 bh_end_io_t *handler)
1631 sector_t last_block;
1632 struct buffer_head *bh, *head;
1633 const unsigned blocksize = 1 << inode->i_blkbits;
1634 int nr_underway = 0;
1635 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1636 WRITE_SYNC : WRITE);
1638 BUG_ON(!PageLocked(page));
1640 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1642 if (!page_has_buffers(page)) {
1643 create_empty_buffers(page, blocksize,
1644 (1 << BH_Dirty)|(1 << BH_Uptodate));
1648 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1649 * here, and the (potentially unmapped) buffers may become dirty at
1650 * any time. If a buffer becomes dirty here after we've inspected it
1651 * then we just miss that fact, and the page stays dirty.
1653 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1654 * handle that here by just cleaning them.
1657 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1658 head = page_buffers(page);
1662 * Get all the dirty buffers mapped to disk addresses and
1663 * handle any aliases from the underlying blockdev's mapping.
1666 if (block > last_block) {
1668 * mapped buffers outside i_size will occur, because
1669 * this page can be outside i_size when there is a
1670 * truncate in progress.
1673 * The buffer was zeroed by block_write_full_page()
1675 clear_buffer_dirty(bh);
1676 set_buffer_uptodate(bh);
1677 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1679 WARN_ON(bh->b_size != blocksize);
1680 err = get_block(inode, block, bh, 1);
1683 clear_buffer_delay(bh);
1684 if (buffer_new(bh)) {
1685 /* blockdev mappings never come here */
1686 clear_buffer_new(bh);
1687 unmap_underlying_metadata(bh->b_bdev,
1691 bh = bh->b_this_page;
1693 } while (bh != head);
1696 if (!buffer_mapped(bh))
1699 * If it's a fully non-blocking write attempt and we cannot
1700 * lock the buffer then redirty the page. Note that this can
1701 * potentially cause a busy-wait loop from writeback threads
1702 * and kswapd activity, but those code paths have their own
1703 * higher-level throttling.
1705 if (wbc->sync_mode != WB_SYNC_NONE) {
1707 } else if (!trylock_buffer(bh)) {
1708 redirty_page_for_writepage(wbc, page);
1711 if (test_clear_buffer_dirty(bh)) {
1712 mark_buffer_async_write_endio(bh, handler);
1716 } while ((bh = bh->b_this_page) != head);
1719 * The page and its buffers are protected by PageWriteback(), so we can
1720 * drop the bh refcounts early.
1722 BUG_ON(PageWriteback(page));
1723 set_page_writeback(page);
1726 struct buffer_head *next = bh->b_this_page;
1727 if (buffer_async_write(bh)) {
1728 submit_bh(write_op, bh);
1732 } while (bh != head);
1737 if (nr_underway == 0) {
1739 * The page was marked dirty, but the buffers were
1740 * clean. Someone wrote them back by hand with
1741 * ll_rw_block/submit_bh. A rare case.
1743 end_page_writeback(page);
1746 * The page and buffer_heads can be released at any time from
1754 * ENOSPC, or some other error. We may already have added some
1755 * blocks to the file, so we need to write these out to avoid
1756 * exposing stale data.
1757 * The page is currently locked and not marked for writeback
1760 /* Recovery: lock and submit the mapped buffers */
1762 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1763 !buffer_delay(bh)) {
1765 mark_buffer_async_write_endio(bh, handler);
1768 * The buffer may have been set dirty during
1769 * attachment to a dirty page.
1771 clear_buffer_dirty(bh);
1773 } while ((bh = bh->b_this_page) != head);
1775 BUG_ON(PageWriteback(page));
1776 mapping_set_error(page->mapping, err);
1777 set_page_writeback(page);
1779 struct buffer_head *next = bh->b_this_page;
1780 if (buffer_async_write(bh)) {
1781 clear_buffer_dirty(bh);
1782 submit_bh(write_op, bh);
1786 } while (bh != head);
1792 * If a page has any new buffers, zero them out here, and mark them uptodate
1793 * and dirty so they'll be written out (in order to prevent uninitialised
1794 * block data from leaking). And clear the new bit.
1796 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1798 unsigned int block_start, block_end;
1799 struct buffer_head *head, *bh;
1801 BUG_ON(!PageLocked(page));
1802 if (!page_has_buffers(page))
1805 bh = head = page_buffers(page);
1808 block_end = block_start + bh->b_size;
1810 if (buffer_new(bh)) {
1811 if (block_end > from && block_start < to) {
1812 if (!PageUptodate(page)) {
1813 unsigned start, size;
1815 start = max(from, block_start);
1816 size = min(to, block_end) - start;
1818 zero_user(page, start, size);
1819 set_buffer_uptodate(bh);
1822 clear_buffer_new(bh);
1823 mark_buffer_dirty(bh);
1827 block_start = block_end;
1828 bh = bh->b_this_page;
1829 } while (bh != head);
1831 EXPORT_SYMBOL(page_zero_new_buffers);
1833 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1834 get_block_t *get_block)
1836 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1837 unsigned to = from + len;
1838 struct inode *inode = page->mapping->host;
1839 unsigned block_start, block_end;
1842 unsigned blocksize, bbits;
1843 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1845 BUG_ON(!PageLocked(page));
1846 BUG_ON(from > PAGE_CACHE_SIZE);
1847 BUG_ON(to > PAGE_CACHE_SIZE);
1850 blocksize = 1 << inode->i_blkbits;
1851 if (!page_has_buffers(page))
1852 create_empty_buffers(page, blocksize, 0);
1853 head = page_buffers(page);
1855 bbits = inode->i_blkbits;
1856 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1858 for(bh = head, block_start = 0; bh != head || !block_start;
1859 block++, block_start=block_end, bh = bh->b_this_page) {
1860 block_end = block_start + blocksize;
1861 if (block_end <= from || block_start >= to) {
1862 if (PageUptodate(page)) {
1863 if (!buffer_uptodate(bh))
1864 set_buffer_uptodate(bh);
1869 clear_buffer_new(bh);
1870 if (!buffer_mapped(bh)) {
1871 WARN_ON(bh->b_size != blocksize);
1872 err = get_block(inode, block, bh, 1);
1875 if (buffer_new(bh)) {
1876 unmap_underlying_metadata(bh->b_bdev,
1878 if (PageUptodate(page)) {
1879 clear_buffer_new(bh);
1880 set_buffer_uptodate(bh);
1881 mark_buffer_dirty(bh);
1884 if (block_end > to || block_start < from)
1885 zero_user_segments(page,
1891 if (PageUptodate(page)) {
1892 if (!buffer_uptodate(bh))
1893 set_buffer_uptodate(bh);
1896 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1897 !buffer_unwritten(bh) &&
1898 (block_start < from || block_end > to)) {
1899 ll_rw_block(READ, 1, &bh);
1904 * If we issued read requests - let them complete.
1906 while(wait_bh > wait) {
1907 wait_on_buffer(*--wait_bh);
1908 if (!buffer_uptodate(*wait_bh))
1912 page_zero_new_buffers(page, from, to);
1915 EXPORT_SYMBOL(__block_write_begin);
1917 static int __block_commit_write(struct inode *inode, struct page *page,
1918 unsigned from, unsigned to)
1920 unsigned block_start, block_end;
1923 struct buffer_head *bh, *head;
1925 blocksize = 1 << inode->i_blkbits;
1927 for(bh = head = page_buffers(page), block_start = 0;
1928 bh != head || !block_start;
1929 block_start=block_end, bh = bh->b_this_page) {
1930 block_end = block_start + blocksize;
1931 if (block_end <= from || block_start >= to) {
1932 if (!buffer_uptodate(bh))
1935 set_buffer_uptodate(bh);
1936 mark_buffer_dirty(bh);
1938 clear_buffer_new(bh);
1942 * If this is a partial write which happened to make all buffers
1943 * uptodate then we can optimize away a bogus readpage() for
1944 * the next read(). Here we 'discover' whether the page went
1945 * uptodate as a result of this (potentially partial) write.
1948 SetPageUptodate(page);
1953 * block_write_begin takes care of the basic task of block allocation and
1954 * bringing partial write blocks uptodate first.
1956 * The filesystem needs to handle block truncation upon failure.
1958 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1959 unsigned flags, struct page **pagep, get_block_t *get_block)
1961 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1965 page = grab_cache_page_write_begin(mapping, index, flags);
1969 status = __block_write_begin(page, pos, len, get_block);
1970 if (unlikely(status)) {
1972 page_cache_release(page);
1979 EXPORT_SYMBOL(block_write_begin);
1981 int block_write_end(struct file *file, struct address_space *mapping,
1982 loff_t pos, unsigned len, unsigned copied,
1983 struct page *page, void *fsdata)
1985 struct inode *inode = mapping->host;
1988 start = pos & (PAGE_CACHE_SIZE - 1);
1990 if (unlikely(copied < len)) {
1992 * The buffers that were written will now be uptodate, so we
1993 * don't have to worry about a readpage reading them and
1994 * overwriting a partial write. However if we have encountered
1995 * a short write and only partially written into a buffer, it
1996 * will not be marked uptodate, so a readpage might come in and
1997 * destroy our partial write.
1999 * Do the simplest thing, and just treat any short write to a
2000 * non uptodate page as a zero-length write, and force the
2001 * caller to redo the whole thing.
2003 if (!PageUptodate(page))
2006 page_zero_new_buffers(page, start+copied, start+len);
2008 flush_dcache_page(page);
2010 /* This could be a short (even 0-length) commit */
2011 __block_commit_write(inode, page, start, start+copied);
2015 EXPORT_SYMBOL(block_write_end);
2017 int generic_write_end(struct file *file, struct address_space *mapping,
2018 loff_t pos, unsigned len, unsigned copied,
2019 struct page *page, void *fsdata)
2021 struct inode *inode = mapping->host;
2022 int i_size_changed = 0;
2024 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2027 * No need to use i_size_read() here, the i_size
2028 * cannot change under us because we hold i_mutex.
2030 * But it's important to update i_size while still holding page lock:
2031 * page writeout could otherwise come in and zero beyond i_size.
2033 if (pos+copied > inode->i_size) {
2034 i_size_write(inode, pos+copied);
2039 page_cache_release(page);
2042 * Don't mark the inode dirty under page lock. First, it unnecessarily
2043 * makes the holding time of page lock longer. Second, it forces lock
2044 * ordering of page lock and transaction start for journaling
2048 mark_inode_dirty(inode);
2052 EXPORT_SYMBOL(generic_write_end);
2055 * block_is_partially_uptodate checks whether buffers within a page are
2058 * Returns true if all buffers which correspond to a file portion
2059 * we want to read are uptodate.
2061 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2064 struct inode *inode = page->mapping->host;
2065 unsigned block_start, block_end, blocksize;
2067 struct buffer_head *bh, *head;
2070 if (!page_has_buffers(page))
2073 blocksize = 1 << inode->i_blkbits;
2074 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2076 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2079 head = page_buffers(page);
2083 block_end = block_start + blocksize;
2084 if (block_end > from && block_start < to) {
2085 if (!buffer_uptodate(bh)) {
2089 if (block_end >= to)
2092 block_start = block_end;
2093 bh = bh->b_this_page;
2094 } while (bh != head);
2098 EXPORT_SYMBOL(block_is_partially_uptodate);
2101 * Generic "read page" function for block devices that have the normal
2102 * get_block functionality. This is most of the block device filesystems.
2103 * Reads the page asynchronously --- the unlock_buffer() and
2104 * set/clear_buffer_uptodate() functions propagate buffer state into the
2105 * page struct once IO has completed.
2107 int block_read_full_page(struct page *page, get_block_t *get_block)
2109 struct inode *inode = page->mapping->host;
2110 sector_t iblock, lblock;
2111 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2112 unsigned int blocksize;
2114 int fully_mapped = 1;
2116 BUG_ON(!PageLocked(page));
2117 blocksize = 1 << inode->i_blkbits;
2118 if (!page_has_buffers(page))
2119 create_empty_buffers(page, blocksize, 0);
2120 head = page_buffers(page);
2122 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2123 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2129 if (buffer_uptodate(bh))
2132 if (!buffer_mapped(bh)) {
2136 if (iblock < lblock) {
2137 WARN_ON(bh->b_size != blocksize);
2138 err = get_block(inode, iblock, bh, 0);
2142 if (!buffer_mapped(bh)) {
2143 zero_user(page, i * blocksize, blocksize);
2145 set_buffer_uptodate(bh);
2149 * get_block() might have updated the buffer
2152 if (buffer_uptodate(bh))
2156 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2159 SetPageMappedToDisk(page);
2163 * All buffers are uptodate - we can set the page uptodate
2164 * as well. But not if get_block() returned an error.
2166 if (!PageError(page))
2167 SetPageUptodate(page);
2172 /* Stage two: lock the buffers */
2173 for (i = 0; i < nr; i++) {
2176 mark_buffer_async_read(bh);
2180 * Stage 3: start the IO. Check for uptodateness
2181 * inside the buffer lock in case another process reading
2182 * the underlying blockdev brought it uptodate (the sct fix).
2184 for (i = 0; i < nr; i++) {
2186 if (buffer_uptodate(bh))
2187 end_buffer_async_read(bh, 1);
2189 submit_bh(READ, bh);
2193 EXPORT_SYMBOL(block_read_full_page);
2195 /* utility function for filesystems that need to do work on expanding
2196 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2197 * deal with the hole.
2199 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2201 struct address_space *mapping = inode->i_mapping;
2206 err = inode_newsize_ok(inode, size);
2210 err = pagecache_write_begin(NULL, mapping, size, 0,
2211 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2216 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2222 EXPORT_SYMBOL(generic_cont_expand_simple);
2224 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2225 loff_t pos, loff_t *bytes)
2227 struct inode *inode = mapping->host;
2228 unsigned blocksize = 1 << inode->i_blkbits;
2231 pgoff_t index, curidx;
2233 unsigned zerofrom, offset, len;
2236 index = pos >> PAGE_CACHE_SHIFT;
2237 offset = pos & ~PAGE_CACHE_MASK;
2239 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2240 zerofrom = curpos & ~PAGE_CACHE_MASK;
2241 if (zerofrom & (blocksize-1)) {
2242 *bytes |= (blocksize-1);
2245 len = PAGE_CACHE_SIZE - zerofrom;
2247 err = pagecache_write_begin(file, mapping, curpos, len,
2248 AOP_FLAG_UNINTERRUPTIBLE,
2252 zero_user(page, zerofrom, len);
2253 err = pagecache_write_end(file, mapping, curpos, len, len,
2260 balance_dirty_pages_ratelimited(mapping);
2263 /* page covers the boundary, find the boundary offset */
2264 if (index == curidx) {
2265 zerofrom = curpos & ~PAGE_CACHE_MASK;
2266 /* if we will expand the thing last block will be filled */
2267 if (offset <= zerofrom) {
2270 if (zerofrom & (blocksize-1)) {
2271 *bytes |= (blocksize-1);
2274 len = offset - zerofrom;
2276 err = pagecache_write_begin(file, mapping, curpos, len,
2277 AOP_FLAG_UNINTERRUPTIBLE,
2281 zero_user(page, zerofrom, len);
2282 err = pagecache_write_end(file, mapping, curpos, len, len,
2294 * For moronic filesystems that do not allow holes in file.
2295 * We may have to extend the file.
2297 int cont_write_begin(struct file *file, struct address_space *mapping,
2298 loff_t pos, unsigned len, unsigned flags,
2299 struct page **pagep, void **fsdata,
2300 get_block_t *get_block, loff_t *bytes)
2302 struct inode *inode = mapping->host;
2303 unsigned blocksize = 1 << inode->i_blkbits;
2307 err = cont_expand_zero(file, mapping, pos, bytes);
2311 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2312 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2313 *bytes |= (blocksize-1);
2317 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2319 EXPORT_SYMBOL(cont_write_begin);
2321 int block_commit_write(struct page *page, unsigned from, unsigned to)
2323 struct inode *inode = page->mapping->host;
2324 __block_commit_write(inode,page,from,to);
2327 EXPORT_SYMBOL(block_commit_write);
2330 * block_page_mkwrite() is not allowed to change the file size as it gets
2331 * called from a page fault handler when a page is first dirtied. Hence we must
2332 * be careful to check for EOF conditions here. We set the page up correctly
2333 * for a written page which means we get ENOSPC checking when writing into
2334 * holes and correct delalloc and unwritten extent mapping on filesystems that
2335 * support these features.
2337 * We are not allowed to take the i_mutex here so we have to play games to
2338 * protect against truncate races as the page could now be beyond EOF. Because
2339 * truncate writes the inode size before removing pages, once we have the
2340 * page lock we can determine safely if the page is beyond EOF. If it is not
2341 * beyond EOF, then the page is guaranteed safe against truncation until we
2344 * Direct callers of this function should call vfs_check_frozen() so that page
2345 * fault does not busyloop until the fs is thawed.
2347 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2348 get_block_t get_block)
2350 struct page *page = vmf->page;
2351 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2357 size = i_size_read(inode);
2358 if ((page->mapping != inode->i_mapping) ||
2359 (page_offset(page) > size)) {
2360 /* We overload EFAULT to mean page got truncated */
2365 /* page is wholly or partially inside EOF */
2366 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2367 end = size & ~PAGE_CACHE_MASK;
2369 end = PAGE_CACHE_SIZE;
2371 ret = __block_write_begin(page, 0, end, get_block);
2373 ret = block_commit_write(page, 0, end);
2375 if (unlikely(ret < 0))
2378 * Freezing in progress? We check after the page is marked dirty and
2379 * with page lock held so if the test here fails, we are sure freezing
2380 * code will wait during syncing until the page fault is done - at that
2381 * point page will be dirty and unlocked so freezing code will write it
2382 * and writeprotect it again.
2384 set_page_dirty(page);
2385 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2389 wait_on_page_writeback(page);
2395 EXPORT_SYMBOL(__block_page_mkwrite);
2397 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2398 get_block_t get_block)
2401 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2404 * This check is racy but catches the common case. The check in
2405 * __block_page_mkwrite() is reliable.
2407 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2408 ret = __block_page_mkwrite(vma, vmf, get_block);
2409 return block_page_mkwrite_return(ret);
2411 EXPORT_SYMBOL(block_page_mkwrite);
2414 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2415 * immediately, while under the page lock. So it needs a special end_io
2416 * handler which does not touch the bh after unlocking it.
2418 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2420 __end_buffer_read_notouch(bh, uptodate);
2424 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2425 * the page (converting it to circular linked list and taking care of page
2428 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2430 struct buffer_head *bh;
2432 BUG_ON(!PageLocked(page));
2434 spin_lock(&page->mapping->private_lock);
2437 if (PageDirty(page))
2438 set_buffer_dirty(bh);
2439 if (!bh->b_this_page)
2440 bh->b_this_page = head;
2441 bh = bh->b_this_page;
2442 } while (bh != head);
2443 attach_page_buffers(page, head);
2444 spin_unlock(&page->mapping->private_lock);
2448 * On entry, the page is fully not uptodate.
2449 * On exit the page is fully uptodate in the areas outside (from,to)
2450 * The filesystem needs to handle block truncation upon failure.
2452 int nobh_write_begin(struct address_space *mapping,
2453 loff_t pos, unsigned len, unsigned flags,
2454 struct page **pagep, void **fsdata,
2455 get_block_t *get_block)
2457 struct inode *inode = mapping->host;
2458 const unsigned blkbits = inode->i_blkbits;
2459 const unsigned blocksize = 1 << blkbits;
2460 struct buffer_head *head, *bh;
2464 unsigned block_in_page;
2465 unsigned block_start, block_end;
2466 sector_t block_in_file;
2469 int is_mapped_to_disk = 1;
2471 index = pos >> PAGE_CACHE_SHIFT;
2472 from = pos & (PAGE_CACHE_SIZE - 1);
2475 page = grab_cache_page_write_begin(mapping, index, flags);
2481 if (page_has_buffers(page)) {
2482 ret = __block_write_begin(page, pos, len, get_block);
2488 if (PageMappedToDisk(page))
2492 * Allocate buffers so that we can keep track of state, and potentially
2493 * attach them to the page if an error occurs. In the common case of
2494 * no error, they will just be freed again without ever being attached
2495 * to the page (which is all OK, because we're under the page lock).
2497 * Be careful: the buffer linked list is a NULL terminated one, rather
2498 * than the circular one we're used to.
2500 head = alloc_page_buffers(page, blocksize, 0);
2506 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2509 * We loop across all blocks in the page, whether or not they are
2510 * part of the affected region. This is so we can discover if the
2511 * page is fully mapped-to-disk.
2513 for (block_start = 0, block_in_page = 0, bh = head;
2514 block_start < PAGE_CACHE_SIZE;
2515 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2518 block_end = block_start + blocksize;
2521 if (block_start >= to)
2523 ret = get_block(inode, block_in_file + block_in_page,
2527 if (!buffer_mapped(bh))
2528 is_mapped_to_disk = 0;
2530 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2531 if (PageUptodate(page)) {
2532 set_buffer_uptodate(bh);
2535 if (buffer_new(bh) || !buffer_mapped(bh)) {
2536 zero_user_segments(page, block_start, from,
2540 if (buffer_uptodate(bh))
2541 continue; /* reiserfs does this */
2542 if (block_start < from || block_end > to) {
2544 bh->b_end_io = end_buffer_read_nobh;
2545 submit_bh(READ, bh);
2552 * The page is locked, so these buffers are protected from
2553 * any VM or truncate activity. Hence we don't need to care
2554 * for the buffer_head refcounts.
2556 for (bh = head; bh; bh = bh->b_this_page) {
2558 if (!buffer_uptodate(bh))
2565 if (is_mapped_to_disk)
2566 SetPageMappedToDisk(page);
2568 *fsdata = head; /* to be released by nobh_write_end */
2575 * Error recovery is a bit difficult. We need to zero out blocks that
2576 * were newly allocated, and dirty them to ensure they get written out.
2577 * Buffers need to be attached to the page at this point, otherwise
2578 * the handling of potential IO errors during writeout would be hard
2579 * (could try doing synchronous writeout, but what if that fails too?)
2581 attach_nobh_buffers(page, head);
2582 page_zero_new_buffers(page, from, to);
2586 page_cache_release(page);
2591 EXPORT_SYMBOL(nobh_write_begin);
2593 int nobh_write_end(struct file *file, struct address_space *mapping,
2594 loff_t pos, unsigned len, unsigned copied,
2595 struct page *page, void *fsdata)
2597 struct inode *inode = page->mapping->host;
2598 struct buffer_head *head = fsdata;
2599 struct buffer_head *bh;
2600 BUG_ON(fsdata != NULL && page_has_buffers(page));
2602 if (unlikely(copied < len) && head)
2603 attach_nobh_buffers(page, head);
2604 if (page_has_buffers(page))
2605 return generic_write_end(file, mapping, pos, len,
2606 copied, page, fsdata);
2608 SetPageUptodate(page);
2609 set_page_dirty(page);
2610 if (pos+copied > inode->i_size) {
2611 i_size_write(inode, pos+copied);
2612 mark_inode_dirty(inode);
2616 page_cache_release(page);
2620 head = head->b_this_page;
2621 free_buffer_head(bh);
2626 EXPORT_SYMBOL(nobh_write_end);
2629 * nobh_writepage() - based on block_full_write_page() except
2630 * that it tries to operate without attaching bufferheads to
2633 int nobh_writepage(struct page *page, get_block_t *get_block,
2634 struct writeback_control *wbc)
2636 struct inode * const inode = page->mapping->host;
2637 loff_t i_size = i_size_read(inode);
2638 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2642 /* Is the page fully inside i_size? */
2643 if (page->index < end_index)
2646 /* Is the page fully outside i_size? (truncate in progress) */
2647 offset = i_size & (PAGE_CACHE_SIZE-1);
2648 if (page->index >= end_index+1 || !offset) {
2650 * The page may have dirty, unmapped buffers. For example,
2651 * they may have been added in ext3_writepage(). Make them
2652 * freeable here, so the page does not leak.
2655 /* Not really sure about this - do we need this ? */
2656 if (page->mapping->a_ops->invalidatepage)
2657 page->mapping->a_ops->invalidatepage(page, offset);
2660 return 0; /* don't care */
2664 * The page straddles i_size. It must be zeroed out on each and every
2665 * writepage invocation because it may be mmapped. "A file is mapped
2666 * in multiples of the page size. For a file that is not a multiple of
2667 * the page size, the remaining memory is zeroed when mapped, and
2668 * writes to that region are not written out to the file."
2670 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2672 ret = mpage_writepage(page, get_block, wbc);
2674 ret = __block_write_full_page(inode, page, get_block, wbc,
2675 end_buffer_async_write);
2678 EXPORT_SYMBOL(nobh_writepage);
2680 int nobh_truncate_page(struct address_space *mapping,
2681 loff_t from, get_block_t *get_block)
2683 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2684 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2687 unsigned length, pos;
2688 struct inode *inode = mapping->host;
2690 struct buffer_head map_bh;
2693 blocksize = 1 << inode->i_blkbits;
2694 length = offset & (blocksize - 1);
2696 /* Block boundary? Nothing to do */
2700 length = blocksize - length;
2701 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2703 page = grab_cache_page(mapping, index);
2708 if (page_has_buffers(page)) {
2711 page_cache_release(page);
2712 return block_truncate_page(mapping, from, get_block);
2715 /* Find the buffer that contains "offset" */
2717 while (offset >= pos) {
2722 map_bh.b_size = blocksize;
2724 err = get_block(inode, iblock, &map_bh, 0);
2727 /* unmapped? It's a hole - nothing to do */
2728 if (!buffer_mapped(&map_bh))
2731 /* Ok, it's mapped. Make sure it's up-to-date */
2732 if (!PageUptodate(page)) {
2733 err = mapping->a_ops->readpage(NULL, page);
2735 page_cache_release(page);
2739 if (!PageUptodate(page)) {
2743 if (page_has_buffers(page))
2746 zero_user(page, offset, length);
2747 set_page_dirty(page);
2752 page_cache_release(page);
2756 EXPORT_SYMBOL(nobh_truncate_page);
2758 int block_truncate_page(struct address_space *mapping,
2759 loff_t from, get_block_t *get_block)
2761 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2762 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2765 unsigned length, pos;
2766 struct inode *inode = mapping->host;
2768 struct buffer_head *bh;
2771 blocksize = 1 << inode->i_blkbits;
2772 length = offset & (blocksize - 1);
2774 /* Block boundary? Nothing to do */
2778 length = blocksize - length;
2779 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2781 page = grab_cache_page(mapping, index);
2786 if (!page_has_buffers(page))
2787 create_empty_buffers(page, blocksize, 0);
2789 /* Find the buffer that contains "offset" */
2790 bh = page_buffers(page);
2792 while (offset >= pos) {
2793 bh = bh->b_this_page;
2799 if (!buffer_mapped(bh)) {
2800 WARN_ON(bh->b_size != blocksize);
2801 err = get_block(inode, iblock, bh, 0);
2804 /* unmapped? It's a hole - nothing to do */
2805 if (!buffer_mapped(bh))
2809 /* Ok, it's mapped. Make sure it's up-to-date */
2810 if (PageUptodate(page))
2811 set_buffer_uptodate(bh);
2813 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2815 ll_rw_block(READ, 1, &bh);
2817 /* Uhhuh. Read error. Complain and punt. */
2818 if (!buffer_uptodate(bh))
2822 zero_user(page, offset, length);
2823 mark_buffer_dirty(bh);
2828 page_cache_release(page);
2832 EXPORT_SYMBOL(block_truncate_page);
2835 * The generic ->writepage function for buffer-backed address_spaces
2836 * this form passes in the end_io handler used to finish the IO.
2838 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2839 struct writeback_control *wbc, bh_end_io_t *handler)
2841 struct inode * const inode = page->mapping->host;
2842 loff_t i_size = i_size_read(inode);
2843 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2846 /* Is the page fully inside i_size? */
2847 if (page->index < end_index)
2848 return __block_write_full_page(inode, page, get_block, wbc,
2851 /* Is the page fully outside i_size? (truncate in progress) */
2852 offset = i_size & (PAGE_CACHE_SIZE-1);
2853 if (page->index >= end_index+1 || !offset) {
2855 * The page may have dirty, unmapped buffers. For example,
2856 * they may have been added in ext3_writepage(). Make them
2857 * freeable here, so the page does not leak.
2859 do_invalidatepage(page, 0);
2861 return 0; /* don't care */
2865 * The page straddles i_size. It must be zeroed out on each and every
2866 * writepage invocation because it may be mmapped. "A file is mapped
2867 * in multiples of the page size. For a file that is not a multiple of
2868 * the page size, the remaining memory is zeroed when mapped, and
2869 * writes to that region are not written out to the file."
2871 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2872 return __block_write_full_page(inode, page, get_block, wbc, handler);
2874 EXPORT_SYMBOL(block_write_full_page_endio);
2877 * The generic ->writepage function for buffer-backed address_spaces
2879 int block_write_full_page(struct page *page, get_block_t *get_block,
2880 struct writeback_control *wbc)
2882 return block_write_full_page_endio(page, get_block, wbc,
2883 end_buffer_async_write);
2885 EXPORT_SYMBOL(block_write_full_page);
2887 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2888 get_block_t *get_block)
2890 struct buffer_head tmp;
2891 struct inode *inode = mapping->host;
2894 tmp.b_size = 1 << inode->i_blkbits;
2895 get_block(inode, block, &tmp, 0);
2896 return tmp.b_blocknr;
2898 EXPORT_SYMBOL(generic_block_bmap);
2900 static void end_bio_bh_io_sync(struct bio *bio, int err)
2902 struct buffer_head *bh = bio->bi_private;
2904 if (err == -EOPNOTSUPP) {
2905 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2908 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2909 set_bit(BH_Quiet, &bh->b_state);
2911 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2915 int submit_bh(int rw, struct buffer_head * bh)
2920 BUG_ON(!buffer_locked(bh));
2921 BUG_ON(!buffer_mapped(bh));
2922 BUG_ON(!bh->b_end_io);
2923 BUG_ON(buffer_delay(bh));
2924 BUG_ON(buffer_unwritten(bh));
2927 * Only clear out a write error when rewriting
2929 if (test_set_buffer_req(bh) && (rw & WRITE))
2930 clear_buffer_write_io_error(bh);
2933 * from here on down, it's all bio -- do the initial mapping,
2934 * submit_bio -> generic_make_request may further map this bio around
2936 bio = bio_alloc(GFP_NOIO, 1);
2938 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2939 bio->bi_bdev = bh->b_bdev;
2940 bio->bi_io_vec[0].bv_page = bh->b_page;
2941 bio->bi_io_vec[0].bv_len = bh->b_size;
2942 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2946 bio->bi_size = bh->b_size;
2948 bio->bi_end_io = end_bio_bh_io_sync;
2949 bio->bi_private = bh;
2952 submit_bio(rw, bio);
2954 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2960 EXPORT_SYMBOL(submit_bh);
2963 * ll_rw_block: low-level access to block devices (DEPRECATED)
2964 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2965 * @nr: number of &struct buffer_heads in the array
2966 * @bhs: array of pointers to &struct buffer_head
2968 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2969 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2970 * %READA option is described in the documentation for generic_make_request()
2971 * which ll_rw_block() calls.
2973 * This function drops any buffer that it cannot get a lock on (with the
2974 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2975 * request, and any buffer that appears to be up-to-date when doing read
2976 * request. Further it marks as clean buffers that are processed for
2977 * writing (the buffer cache won't assume that they are actually clean
2978 * until the buffer gets unlocked).
2980 * ll_rw_block sets b_end_io to simple completion handler that marks
2981 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2984 * All of the buffers must be for the same device, and must also be a
2985 * multiple of the current approved size for the device.
2987 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2991 for (i = 0; i < nr; i++) {
2992 struct buffer_head *bh = bhs[i];
2994 if (!trylock_buffer(bh))
2997 if (test_clear_buffer_dirty(bh)) {
2998 bh->b_end_io = end_buffer_write_sync;
3000 submit_bh(WRITE, bh);
3004 if (!buffer_uptodate(bh)) {
3005 bh->b_end_io = end_buffer_read_sync;
3014 EXPORT_SYMBOL(ll_rw_block);
3016 void write_dirty_buffer(struct buffer_head *bh, int rw)
3019 if (!test_clear_buffer_dirty(bh)) {
3023 bh->b_end_io = end_buffer_write_sync;
3027 EXPORT_SYMBOL(write_dirty_buffer);
3030 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3031 * and then start new I/O and then wait upon it. The caller must have a ref on
3034 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3038 WARN_ON(atomic_read(&bh->b_count) < 1);
3040 if (test_clear_buffer_dirty(bh)) {
3042 bh->b_end_io = end_buffer_write_sync;
3043 ret = submit_bh(rw, bh);
3045 if (!ret && !buffer_uptodate(bh))
3052 EXPORT_SYMBOL(__sync_dirty_buffer);
3054 int sync_dirty_buffer(struct buffer_head *bh)
3056 return __sync_dirty_buffer(bh, WRITE_SYNC);
3058 EXPORT_SYMBOL(sync_dirty_buffer);
3061 * try_to_free_buffers() checks if all the buffers on this particular page
3062 * are unused, and releases them if so.
3064 * Exclusion against try_to_free_buffers may be obtained by either
3065 * locking the page or by holding its mapping's private_lock.
3067 * If the page is dirty but all the buffers are clean then we need to
3068 * be sure to mark the page clean as well. This is because the page
3069 * may be against a block device, and a later reattachment of buffers
3070 * to a dirty page will set *all* buffers dirty. Which would corrupt
3071 * filesystem data on the same device.
3073 * The same applies to regular filesystem pages: if all the buffers are
3074 * clean then we set the page clean and proceed. To do that, we require
3075 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3078 * try_to_free_buffers() is non-blocking.
3080 static inline int buffer_busy(struct buffer_head *bh)
3082 return atomic_read(&bh->b_count) |
3083 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3087 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3089 struct buffer_head *head = page_buffers(page);
3090 struct buffer_head *bh;
3094 if (buffer_write_io_error(bh) && page->mapping)
3095 set_bit(AS_EIO, &page->mapping->flags);
3096 if (buffer_busy(bh))
3098 bh = bh->b_this_page;
3099 } while (bh != head);
3102 struct buffer_head *next = bh->b_this_page;
3104 if (bh->b_assoc_map)
3105 __remove_assoc_queue(bh);
3107 } while (bh != head);
3108 *buffers_to_free = head;
3109 __clear_page_buffers(page);
3115 int try_to_free_buffers(struct page *page)
3117 struct address_space * const mapping = page->mapping;
3118 struct buffer_head *buffers_to_free = NULL;
3121 BUG_ON(!PageLocked(page));
3122 if (PageWriteback(page))
3125 if (mapping == NULL) { /* can this still happen? */
3126 ret = drop_buffers(page, &buffers_to_free);
3130 spin_lock(&mapping->private_lock);
3131 ret = drop_buffers(page, &buffers_to_free);
3134 * If the filesystem writes its buffers by hand (eg ext3)
3135 * then we can have clean buffers against a dirty page. We
3136 * clean the page here; otherwise the VM will never notice
3137 * that the filesystem did any IO at all.
3139 * Also, during truncate, discard_buffer will have marked all
3140 * the page's buffers clean. We discover that here and clean
3143 * private_lock must be held over this entire operation in order
3144 * to synchronise against __set_page_dirty_buffers and prevent the
3145 * dirty bit from being lost.
3148 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3149 spin_unlock(&mapping->private_lock);
3151 if (buffers_to_free) {
3152 struct buffer_head *bh = buffers_to_free;
3155 struct buffer_head *next = bh->b_this_page;
3156 free_buffer_head(bh);
3158 } while (bh != buffers_to_free);
3162 EXPORT_SYMBOL(try_to_free_buffers);
3165 * There are no bdflush tunables left. But distributions are
3166 * still running obsolete flush daemons, so we terminate them here.
3168 * Use of bdflush() is deprecated and will be removed in a future kernel.
3169 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3171 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3173 static int msg_count;
3175 if (!capable(CAP_SYS_ADMIN))
3178 if (msg_count < 5) {
3181 "warning: process `%s' used the obsolete bdflush"
3182 " system call\n", current->comm);
3183 printk(KERN_INFO "Fix your initscripts?\n");
3192 * Buffer-head allocation
3194 static struct kmem_cache *bh_cachep;
3197 * Once the number of bh's in the machine exceeds this level, we start
3198 * stripping them in writeback.
3200 static int max_buffer_heads;
3202 int buffer_heads_over_limit;
3204 struct bh_accounting {
3205 int nr; /* Number of live bh's */
3206 int ratelimit; /* Limit cacheline bouncing */
3209 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3211 static void recalc_bh_state(void)
3216 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3218 __this_cpu_write(bh_accounting.ratelimit, 0);
3219 for_each_online_cpu(i)
3220 tot += per_cpu(bh_accounting, i).nr;
3221 buffer_heads_over_limit = (tot > max_buffer_heads);
3224 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3226 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3228 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3230 __this_cpu_inc(bh_accounting.nr);
3236 EXPORT_SYMBOL(alloc_buffer_head);
3238 void free_buffer_head(struct buffer_head *bh)
3240 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3241 kmem_cache_free(bh_cachep, bh);
3243 __this_cpu_dec(bh_accounting.nr);
3247 EXPORT_SYMBOL(free_buffer_head);
3249 static void buffer_exit_cpu(int cpu)
3252 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3254 for (i = 0; i < BH_LRU_SIZE; i++) {
3258 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3259 per_cpu(bh_accounting, cpu).nr = 0;
3262 static int buffer_cpu_notify(struct notifier_block *self,
3263 unsigned long action, void *hcpu)
3265 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3266 buffer_exit_cpu((unsigned long)hcpu);
3271 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3272 * @bh: struct buffer_head
3274 * Return true if the buffer is up-to-date and false,
3275 * with the buffer locked, if not.
3277 int bh_uptodate_or_lock(struct buffer_head *bh)
3279 if (!buffer_uptodate(bh)) {
3281 if (!buffer_uptodate(bh))
3287 EXPORT_SYMBOL(bh_uptodate_or_lock);
3290 * bh_submit_read - Submit a locked buffer for reading
3291 * @bh: struct buffer_head
3293 * Returns zero on success and -EIO on error.
3295 int bh_submit_read(struct buffer_head *bh)
3297 BUG_ON(!buffer_locked(bh));
3299 if (buffer_uptodate(bh)) {
3305 bh->b_end_io = end_buffer_read_sync;
3306 submit_bh(READ, bh);
3308 if (buffer_uptodate(bh))
3312 EXPORT_SYMBOL(bh_submit_read);
3314 void __init buffer_init(void)
3318 bh_cachep = kmem_cache_create("buffer_head",
3319 sizeof(struct buffer_head), 0,
3320 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3325 * Limit the bh occupancy to 10% of ZONE_NORMAL
3327 nrpages = (nr_free_buffer_pages() * 10) / 100;
3328 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3329 hotcpu_notifier(buffer_cpu_notify, 0);