1 /* Storage object read/write
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
18 * detect wake up events generated by the unlocking of pages in which we're
20 * - we use this to detect read completion of backing pages
21 * - the caller holds the waitqueue lock
23 static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
26 struct cachefiles_one_read *monitor =
27 container_of(wait, struct cachefiles_one_read, monitor);
28 struct cachefiles_object *object;
29 struct wait_bit_key *key = _key;
30 struct page *page = wait->private;
34 _enter("{%lu},%u,%d,{%p,%u}",
35 monitor->netfs_page->index, mode, sync,
36 key->flags, key->bit_nr);
38 if (key->flags != &page->flags ||
39 key->bit_nr != PG_locked)
42 _debug("--- monitor %p %lx ---", page, page->flags);
44 if (!PageUptodate(page) && !PageError(page)) {
45 /* unlocked, not uptodate and not erronous? */
46 _debug("page probably truncated");
49 /* remove from the waitqueue */
50 list_del(&wait->task_list);
52 /* move onto the action list and queue for FS-Cache thread pool */
55 object = container_of(monitor->op->op.object,
56 struct cachefiles_object, fscache);
58 spin_lock(&object->work_lock);
59 list_add_tail(&monitor->op_link, &monitor->op->to_do);
60 spin_unlock(&object->work_lock);
62 fscache_enqueue_retrieval(monitor->op);
67 * handle a probably truncated page
68 * - check to see if the page is still relevant and reissue the read if
70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
71 * must wait again and 0 if successful
73 static int cachefiles_read_reissue(struct cachefiles_object *object,
74 struct cachefiles_one_read *monitor)
76 struct address_space *bmapping = object->backer->d_inode->i_mapping;
77 struct page *backpage = monitor->back_page, *backpage2;
80 _enter("{ino=%lx},{%lx,%lx}",
81 object->backer->d_inode->i_ino,
82 backpage->index, backpage->flags);
84 /* skip if the page was truncated away completely */
85 if (backpage->mapping != bmapping) {
86 _leave(" = -ENODATA [mapping]");
90 backpage2 = find_get_page(bmapping, backpage->index);
92 _leave(" = -ENODATA [gone]");
96 if (backpage != backpage2) {
98 _leave(" = -ENODATA [different]");
102 /* the page is still there and we already have a ref on it, so we don't
106 INIT_LIST_HEAD(&monitor->op_link);
107 add_page_wait_queue(backpage, &monitor->monitor);
109 if (trylock_page(backpage)) {
111 if (PageError(backpage))
114 if (PageUptodate(backpage))
117 _debug("reissue read");
118 ret = bmapping->a_ops->readpage(NULL, backpage);
123 /* but the page may have been read before the monitor was installed, so
124 * the monitor may miss the event - so we have to ensure that we do get
125 * one in such a case */
126 if (trylock_page(backpage)) {
127 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
128 unlock_page(backpage);
131 /* it'll reappear on the todo list */
132 _leave(" = -EINPROGRESS");
136 unlock_page(backpage);
137 spin_lock_irq(&object->work_lock);
138 list_del(&monitor->op_link);
139 spin_unlock_irq(&object->work_lock);
140 _leave(" = %d", ret);
145 * copy data from backing pages to netfs pages to complete a read operation
146 * - driven by FS-Cache's thread pool
148 static void cachefiles_read_copier(struct fscache_operation *_op)
150 struct cachefiles_one_read *monitor;
151 struct cachefiles_object *object;
152 struct fscache_retrieval *op;
153 struct pagevec pagevec;
156 op = container_of(_op, struct fscache_retrieval, op);
157 object = container_of(op->op.object,
158 struct cachefiles_object, fscache);
160 _enter("{ino=%lu}", object->backer->d_inode->i_ino);
162 pagevec_init(&pagevec, 0);
165 spin_lock_irq(&object->work_lock);
167 while (!list_empty(&op->to_do)) {
168 monitor = list_entry(op->to_do.next,
169 struct cachefiles_one_read, op_link);
170 list_del(&monitor->op_link);
172 spin_unlock_irq(&object->work_lock);
174 _debug("- copy {%lu}", monitor->back_page->index);
177 if (test_bit(FSCACHE_COOKIE_INVALIDATING,
178 &object->fscache.cookie->flags)) {
180 } else if (PageUptodate(monitor->back_page)) {
181 copy_highpage(monitor->netfs_page, monitor->back_page);
182 fscache_mark_page_cached(monitor->op,
183 monitor->netfs_page);
185 } else if (!PageError(monitor->back_page)) {
186 /* the page has probably been truncated */
187 error = cachefiles_read_reissue(object, monitor);
188 if (error == -EINPROGRESS)
192 cachefiles_io_error_obj(
194 "Readpage failed on backing file %lx",
195 (unsigned long) monitor->back_page->flags);
199 page_cache_release(monitor->back_page);
201 fscache_end_io(op, monitor->netfs_page, error);
202 page_cache_release(monitor->netfs_page);
203 fscache_retrieval_complete(op, 1);
204 fscache_put_retrieval(op);
208 /* let the thread pool have some air occasionally */
210 if (max < 0 || need_resched()) {
211 if (!list_empty(&op->to_do))
212 fscache_enqueue_retrieval(op);
213 _leave(" [maxed out]");
217 spin_lock_irq(&object->work_lock);
220 spin_unlock_irq(&object->work_lock);
225 * read the corresponding page to the given set from the backing file
226 * - an uncertain page is simply discarded, to be tried again another time
228 static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
229 struct fscache_retrieval *op,
230 struct page *netpage,
231 struct pagevec *pagevec)
233 struct cachefiles_one_read *monitor;
234 struct address_space *bmapping;
235 struct page *newpage, *backpage;
240 pagevec_reinit(pagevec);
242 _debug("read back %p{%lu,%d}",
243 netpage, netpage->index, page_count(netpage));
245 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
249 monitor->netfs_page = netpage;
250 monitor->op = fscache_get_retrieval(op);
252 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
254 /* attempt to get hold of the backing page */
255 bmapping = object->backer->d_inode->i_mapping;
259 backpage = find_get_page(bmapping, netpage->index);
261 goto backing_page_already_present;
264 newpage = __page_cache_alloc(cachefiles_gfp |
270 ret = add_to_page_cache(newpage, bmapping,
271 netpage->index, cachefiles_gfp);
273 goto installed_new_backing_page;
278 /* we've installed a new backing page, so now we need to add it
279 * to the LRU list and start it reading */
280 installed_new_backing_page:
281 _debug("- new %p", newpage);
286 page_cache_get(backpage);
287 pagevec_add(pagevec, backpage);
288 __pagevec_lru_add_file(pagevec);
291 ret = bmapping->a_ops->readpage(NULL, backpage);
295 /* set the monitor to transfer the data across */
296 monitor_backing_page:
297 _debug("- monitor add");
299 /* install the monitor */
300 page_cache_get(monitor->netfs_page);
301 page_cache_get(backpage);
302 monitor->back_page = backpage;
303 monitor->monitor.private = backpage;
304 add_page_wait_queue(backpage, &monitor->monitor);
307 /* but the page may have been read before the monitor was installed, so
308 * the monitor may miss the event - so we have to ensure that we do get
309 * one in such a case */
310 if (trylock_page(backpage)) {
311 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
312 unlock_page(backpage);
316 /* if the backing page is already present, it can be in one of
317 * three states: read in progress, read failed or read okay */
318 backing_page_already_present:
322 page_cache_release(newpage);
326 if (PageError(backpage))
329 if (PageUptodate(backpage))
330 goto backing_page_already_uptodate;
332 if (!trylock_page(backpage))
333 goto monitor_backing_page;
334 _debug("read %p {%lx}", backpage, backpage->flags);
335 goto read_backing_page;
337 /* the backing page is already up to date, attach the netfs
338 * page to the pagecache and LRU and copy the data across */
339 backing_page_already_uptodate:
340 _debug("- uptodate");
342 fscache_mark_page_cached(op, netpage);
344 copy_highpage(netpage, backpage);
345 fscache_end_io(op, netpage, 0);
346 fscache_retrieval_complete(op, 1);
354 page_cache_release(backpage);
356 fscache_put_retrieval(monitor->op);
359 _leave(" = %d", ret);
363 _debug("read error %d", ret);
367 cachefiles_io_error_obj(object, "Page read error on backing file");
368 fscache_retrieval_complete(op, 1);
373 page_cache_release(newpage);
375 fscache_put_retrieval(monitor->op);
378 fscache_retrieval_complete(op, 1);
379 _leave(" = -ENOMEM");
384 * read a page from the cache or allocate a block in which to store it
385 * - cache withdrawal is prevented by the caller
386 * - returns -EINTR if interrupted
387 * - returns -ENOMEM if ran out of memory
388 * - returns -ENOBUFS if no buffers can be made available
389 * - returns -ENOBUFS if page is beyond EOF
390 * - if the page is backed by a block in the cache:
391 * - a read will be started which will call the callback on completion
392 * - 0 will be returned
393 * - else if the page is unbacked:
394 * - the metadata will be retained
395 * - -ENODATA will be returned
397 int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
401 struct cachefiles_object *object;
402 struct cachefiles_cache *cache;
403 struct pagevec pagevec;
405 sector_t block0, block;
409 object = container_of(op->op.object,
410 struct cachefiles_object, fscache);
411 cache = container_of(object->fscache.cache,
412 struct cachefiles_cache, cache);
414 _enter("{%p},{%lx},,,", object, page->index);
419 inode = object->backer->d_inode;
420 ASSERT(S_ISREG(inode->i_mode));
421 ASSERT(inode->i_mapping->a_ops->bmap);
422 ASSERT(inode->i_mapping->a_ops->readpages);
424 /* calculate the shift required to use bmap */
425 if (inode->i_sb->s_blocksize > PAGE_SIZE)
428 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
430 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
431 op->op.flags |= FSCACHE_OP_ASYNC;
432 op->op.processor = cachefiles_read_copier;
434 pagevec_init(&pagevec, 0);
436 /* we assume the absence or presence of the first block is a good
437 * enough indication for the page as a whole
438 * - TODO: don't use bmap() for this as it is _not_ actually good
439 * enough for this as it doesn't indicate errors, but it's all we've
442 block0 = page->index;
445 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
446 _debug("%llx -> %llx",
447 (unsigned long long) block0,
448 (unsigned long long) block);
451 /* submit the apparently valid page to the backing fs to be
453 ret = cachefiles_read_backing_file_one(object, op, page,
455 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
456 /* there's space in the cache we can use */
457 fscache_mark_page_cached(op, page);
458 fscache_retrieval_complete(op, 1);
464 _leave(" = %d", ret);
468 fscache_retrieval_complete(op, 1);
469 _leave(" = -ENOBUFS");
474 * read the corresponding pages to the given set from the backing file
475 * - any uncertain pages are simply discarded, to be tried again another time
477 static int cachefiles_read_backing_file(struct cachefiles_object *object,
478 struct fscache_retrieval *op,
479 struct list_head *list)
481 struct cachefiles_one_read *monitor = NULL;
482 struct address_space *bmapping = object->backer->d_inode->i_mapping;
483 struct pagevec lru_pvec;
484 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
489 pagevec_init(&lru_pvec, 0);
491 list_for_each_entry_safe(netpage, _n, list, lru) {
492 list_del(&netpage->lru);
494 _debug("read back %p{%lu,%d}",
495 netpage, netpage->index, page_count(netpage));
498 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
502 monitor->op = fscache_get_retrieval(op);
503 init_waitqueue_func_entry(&monitor->monitor,
504 cachefiles_read_waiter);
508 backpage = find_get_page(bmapping, netpage->index);
510 goto backing_page_already_present;
513 newpage = __page_cache_alloc(cachefiles_gfp |
519 ret = add_to_page_cache(newpage, bmapping,
520 netpage->index, cachefiles_gfp);
522 goto installed_new_backing_page;
527 /* we've installed a new backing page, so now we need to add it
528 * to the LRU list and start it reading */
529 installed_new_backing_page:
530 _debug("- new %p", newpage);
535 page_cache_get(backpage);
536 if (!pagevec_add(&lru_pvec, backpage))
537 __pagevec_lru_add_file(&lru_pvec);
540 ret = bmapping->a_ops->readpage(NULL, backpage);
544 /* add the netfs page to the pagecache and LRU, and set the
545 * monitor to transfer the data across */
546 monitor_backing_page:
547 _debug("- monitor add");
549 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
552 if (ret == -EEXIST) {
553 page_cache_release(netpage);
559 page_cache_get(netpage);
560 if (!pagevec_add(&lru_pvec, netpage))
561 __pagevec_lru_add_file(&lru_pvec);
563 /* install a monitor */
564 page_cache_get(netpage);
565 monitor->netfs_page = netpage;
567 page_cache_get(backpage);
568 monitor->back_page = backpage;
569 monitor->monitor.private = backpage;
570 add_page_wait_queue(backpage, &monitor->monitor);
573 /* but the page may have been read before the monitor was
574 * installed, so the monitor may miss the event - so we have to
575 * ensure that we do get one in such a case */
576 if (trylock_page(backpage)) {
577 _debug("2unlock %p {%lx}", backpage, backpage->flags);
578 unlock_page(backpage);
581 page_cache_release(backpage);
584 page_cache_release(netpage);
588 /* if the backing page is already present, it can be in one of
589 * three states: read in progress, read failed or read okay */
590 backing_page_already_present:
591 _debug("- present %p", backpage);
593 if (PageError(backpage))
596 if (PageUptodate(backpage))
597 goto backing_page_already_uptodate;
599 _debug("- not ready %p{%lx}", backpage, backpage->flags);
601 if (!trylock_page(backpage))
602 goto monitor_backing_page;
604 if (PageError(backpage)) {
605 _debug("error %lx", backpage->flags);
606 unlock_page(backpage);
610 if (PageUptodate(backpage))
611 goto backing_page_already_uptodate_unlock;
613 /* we've locked a page that's neither up to date nor erroneous,
614 * so we need to attempt to read it again */
615 goto reread_backing_page;
617 /* the backing page is already up to date, attach the netfs
618 * page to the pagecache and LRU and copy the data across */
619 backing_page_already_uptodate_unlock:
620 _debug("uptodate %lx", backpage->flags);
621 unlock_page(backpage);
622 backing_page_already_uptodate:
623 _debug("- uptodate");
625 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
628 if (ret == -EEXIST) {
629 page_cache_release(netpage);
635 copy_highpage(netpage, backpage);
637 page_cache_release(backpage);
640 fscache_mark_page_cached(op, netpage);
642 page_cache_get(netpage);
643 if (!pagevec_add(&lru_pvec, netpage))
644 __pagevec_lru_add_file(&lru_pvec);
646 /* the netpage is unlocked and marked up to date here */
647 fscache_end_io(op, netpage, 0);
648 fscache_retrieval_complete(op, 1);
649 page_cache_release(netpage);
660 pagevec_lru_add_file(&lru_pvec);
663 page_cache_release(newpage);
665 page_cache_release(netpage);
667 page_cache_release(backpage);
669 fscache_put_retrieval(op);
673 list_for_each_entry_safe(netpage, _n, list, lru) {
674 list_del(&netpage->lru);
675 page_cache_release(netpage);
676 fscache_retrieval_complete(op, 1);
679 _leave(" = %d", ret);
688 _debug("read error %d", ret);
692 cachefiles_io_error_obj(object, "Page read error on backing file");
698 * read a list of pages from the cache or allocate blocks in which to store
701 int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
702 struct list_head *pages,
706 struct cachefiles_object *object;
707 struct cachefiles_cache *cache;
708 struct list_head backpages;
709 struct pagevec pagevec;
711 struct page *page, *_n;
712 unsigned shift, nrbackpages;
713 int ret, ret2, space;
715 object = container_of(op->op.object,
716 struct cachefiles_object, fscache);
717 cache = container_of(object->fscache.cache,
718 struct cachefiles_cache, cache);
720 _enter("{OBJ%x,%d},,%d,,",
721 object->fscache.debug_id, atomic_read(&op->op.usage),
728 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
731 inode = object->backer->d_inode;
732 ASSERT(S_ISREG(inode->i_mode));
733 ASSERT(inode->i_mapping->a_ops->bmap);
734 ASSERT(inode->i_mapping->a_ops->readpages);
736 /* calculate the shift required to use bmap */
737 if (inode->i_sb->s_blocksize > PAGE_SIZE)
740 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
742 pagevec_init(&pagevec, 0);
744 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
745 op->op.flags |= FSCACHE_OP_ASYNC;
746 op->op.processor = cachefiles_read_copier;
748 INIT_LIST_HEAD(&backpages);
751 ret = space ? -ENODATA : -ENOBUFS;
752 list_for_each_entry_safe(page, _n, pages, lru) {
753 sector_t block0, block;
755 /* we assume the absence or presence of the first block is a
756 * good enough indication for the page as a whole
757 * - TODO: don't use bmap() for this as it is _not_ actually
758 * good enough for this as it doesn't indicate errors, but
759 * it's all we've got for the moment
761 block0 = page->index;
764 block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
766 _debug("%llx -> %llx",
767 (unsigned long long) block0,
768 (unsigned long long) block);
771 /* we have data - add it to the list to give to the
773 list_move(&page->lru, &backpages);
776 } else if (space && pagevec_add(&pagevec, page) == 0) {
777 fscache_mark_pages_cached(op, &pagevec);
778 fscache_retrieval_complete(op, 1);
781 fscache_retrieval_complete(op, 1);
785 if (pagevec_count(&pagevec) > 0)
786 fscache_mark_pages_cached(op, &pagevec);
788 if (list_empty(pages))
791 /* submit the apparently valid pages to the backing fs to be read from
793 if (nrbackpages > 0) {
794 ret2 = cachefiles_read_backing_file(object, op, &backpages);
795 if (ret2 == -ENOMEM || ret2 == -EINTR)
799 _leave(" = %d [nr=%u%s]",
800 ret, *nr_pages, list_empty(pages) ? " empty" : "");
804 fscache_retrieval_complete(op, *nr_pages);
809 * allocate a block in the cache in which to store a page
810 * - cache withdrawal is prevented by the caller
811 * - returns -EINTR if interrupted
812 * - returns -ENOMEM if ran out of memory
813 * - returns -ENOBUFS if no buffers can be made available
814 * - returns -ENOBUFS if page is beyond EOF
816 * - the metadata will be retained
817 * - 0 will be returned
819 int cachefiles_allocate_page(struct fscache_retrieval *op,
823 struct cachefiles_object *object;
824 struct cachefiles_cache *cache;
827 object = container_of(op->op.object,
828 struct cachefiles_object, fscache);
829 cache = container_of(object->fscache.cache,
830 struct cachefiles_cache, cache);
832 _enter("%p,{%lx},", object, page->index);
834 ret = cachefiles_has_space(cache, 0, 1);
836 fscache_mark_page_cached(op, page);
840 fscache_retrieval_complete(op, 1);
841 _leave(" = %d", ret);
846 * allocate blocks in the cache in which to store a set of pages
847 * - cache withdrawal is prevented by the caller
848 * - returns -EINTR if interrupted
849 * - returns -ENOMEM if ran out of memory
850 * - returns -ENOBUFS if some buffers couldn't be made available
851 * - returns -ENOBUFS if some pages are beyond EOF
853 * - -ENODATA will be returned
854 * - metadata will be retained for any page marked
856 int cachefiles_allocate_pages(struct fscache_retrieval *op,
857 struct list_head *pages,
861 struct cachefiles_object *object;
862 struct cachefiles_cache *cache;
863 struct pagevec pagevec;
867 object = container_of(op->op.object,
868 struct cachefiles_object, fscache);
869 cache = container_of(object->fscache.cache,
870 struct cachefiles_cache, cache);
872 _enter("%p,,,%d,", object, *nr_pages);
874 ret = cachefiles_has_space(cache, 0, *nr_pages);
876 pagevec_init(&pagevec, 0);
878 list_for_each_entry(page, pages, lru) {
879 if (pagevec_add(&pagevec, page) == 0)
880 fscache_mark_pages_cached(op, &pagevec);
883 if (pagevec_count(&pagevec) > 0)
884 fscache_mark_pages_cached(op, &pagevec);
890 fscache_retrieval_complete(op, *nr_pages);
891 _leave(" = %d", ret);
896 * request a page be stored in the cache
897 * - cache withdrawal is prevented by the caller
898 * - this request may be ignored if there's no cache block available, in which
899 * case -ENOBUFS will be returned
900 * - if the op is in progress, 0 will be returned
902 int cachefiles_write_page(struct fscache_storage *op, struct page *page)
904 struct cachefiles_object *object;
905 struct cachefiles_cache *cache;
915 ASSERT(page != NULL);
917 object = container_of(op->op.object,
918 struct cachefiles_object, fscache);
920 _enter("%p,%p{%lx},,,", object, page, page->index);
922 if (!object->backer) {
923 _leave(" = -ENOBUFS");
927 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
929 cache = container_of(object->fscache.cache,
930 struct cachefiles_cache, cache);
932 /* write the page to the backing filesystem and let it store it in its
934 path.mnt = cache->mnt;
935 path.dentry = object->backer;
936 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
941 if (file->f_op->write) {
942 pos = (loff_t) page->index << PAGE_SHIFT;
944 /* we mustn't write more data than we have, so we have
945 * to beware of a partial page at EOF */
946 eof = object->fscache.store_limit_l;
948 if (eof & ~PAGE_MASK) {
949 ASSERTCMP(pos, <, eof);
950 if (eof - pos < PAGE_SIZE) {
951 _debug("cut short %llx to %llx",
954 ASSERTCMP(pos + len, ==, eof);
961 ret = file->f_op->write(
962 file, (const void __user *) data, len, &pos);
973 cachefiles_io_error_obj(
974 object, "Write page to backing file failed");
978 _leave(" = %d", ret);
983 * detach a backing block from a page
984 * - cache withdrawal is prevented by the caller
986 void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
988 struct cachefiles_object *object;
989 struct cachefiles_cache *cache;
991 object = container_of(_object, struct cachefiles_object, fscache);
992 cache = container_of(object->fscache.cache,
993 struct cachefiles_cache, cache);
995 _enter("%p,{%lu}", object, page->index);
997 spin_unlock(&object->fscache.cookie->lock);