1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
28 val = radix_tree_lookup(&cookie->stores, page->index);
33 EXPORT_SYMBOL(__fscache_check_page_write);
36 * wait for a page to finish being written to the cache
38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
42 wait_event(*wq, !__fscache_check_page_write(cookie, page));
44 EXPORT_SYMBOL(__fscache_wait_on_page_write);
47 * decide whether a page can be released, possibly by cancelling a store to it
48 * - we're allowed to sleep if __GFP_WAIT is flagged
50 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
57 _enter("%p,%p,%x", cookie, page, gfp);
60 val = radix_tree_lookup(&cookie->stores, page->index);
63 fscache_stat(&fscache_n_store_vmscan_not_storing);
64 __fscache_uncache_page(cookie, page);
68 /* see if the page is actually undergoing storage - if so we can't get
69 * rid of it till the cache has finished with it */
70 if (radix_tree_tag_get(&cookie->stores, page->index,
71 FSCACHE_COOKIE_STORING_TAG)) {
76 /* the page is pending storage, so we attempt to cancel the store and
77 * discard the store request so that the page can be reclaimed */
78 spin_lock(&cookie->stores_lock);
81 if (radix_tree_tag_get(&cookie->stores, page->index,
82 FSCACHE_COOKIE_STORING_TAG)) {
83 /* the page started to undergo storage whilst we were looking,
84 * so now we can only wait or return */
85 spin_unlock(&cookie->stores_lock);
89 xpage = radix_tree_delete(&cookie->stores, page->index);
90 spin_unlock(&cookie->stores_lock);
93 fscache_stat(&fscache_n_store_vmscan_cancelled);
94 fscache_stat(&fscache_n_store_radix_deletes);
95 ASSERTCMP(xpage, ==, page);
97 fscache_stat(&fscache_n_store_vmscan_gone);
100 wake_up_bit(&cookie->flags, 0);
102 page_cache_release(xpage);
103 __fscache_uncache_page(cookie, page);
107 /* we might want to wait here, but that could deadlock the allocator as
108 * the slow-work threads writing to the cache may all end up sleeping
109 * on memory allocation */
110 fscache_stat(&fscache_n_store_vmscan_busy);
113 EXPORT_SYMBOL(__fscache_maybe_release_page);
116 * note that a page has finished being written to the cache
118 static void fscache_end_page_write(struct fscache_object *object,
121 struct fscache_cookie *cookie;
122 struct page *xpage = NULL;
124 spin_lock(&object->lock);
125 cookie = object->cookie;
127 /* delete the page from the tree if it is now no longer
129 spin_lock(&cookie->stores_lock);
130 radix_tree_tag_clear(&cookie->stores, page->index,
131 FSCACHE_COOKIE_STORING_TAG);
132 if (!radix_tree_tag_get(&cookie->stores, page->index,
133 FSCACHE_COOKIE_PENDING_TAG)) {
134 fscache_stat(&fscache_n_store_radix_deletes);
135 xpage = radix_tree_delete(&cookie->stores, page->index);
137 spin_unlock(&cookie->stores_lock);
138 wake_up_bit(&cookie->flags, 0);
140 spin_unlock(&object->lock);
142 page_cache_release(xpage);
146 * actually apply the changed attributes to a cache object
148 static void fscache_attr_changed_op(struct fscache_operation *op)
150 struct fscache_object *object = op->object;
153 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
155 fscache_stat(&fscache_n_attr_changed_calls);
157 if (fscache_object_is_active(object)) {
158 fscache_set_op_state(op, "CallFS");
159 fscache_stat(&fscache_n_cop_attr_changed);
160 ret = object->cache->ops->attr_changed(object);
161 fscache_stat_d(&fscache_n_cop_attr_changed);
162 fscache_set_op_state(op, "Done");
164 fscache_abort_object(object);
171 * notification that the attributes on an object have changed
173 int __fscache_attr_changed(struct fscache_cookie *cookie)
175 struct fscache_operation *op;
176 struct fscache_object *object;
178 _enter("%p", cookie);
180 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
182 fscache_stat(&fscache_n_attr_changed);
184 op = kzalloc(sizeof(*op), GFP_KERNEL);
186 fscache_stat(&fscache_n_attr_changed_nomem);
187 _leave(" = -ENOMEM");
191 fscache_operation_init(op, NULL);
192 fscache_operation_init_slow(op, fscache_attr_changed_op);
193 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
194 fscache_set_op_name(op, "Attr");
196 spin_lock(&cookie->lock);
198 if (hlist_empty(&cookie->backing_objects))
200 object = hlist_entry(cookie->backing_objects.first,
201 struct fscache_object, cookie_link);
203 if (fscache_submit_exclusive_op(object, op) < 0)
205 spin_unlock(&cookie->lock);
206 fscache_stat(&fscache_n_attr_changed_ok);
207 fscache_put_operation(op);
212 spin_unlock(&cookie->lock);
214 fscache_stat(&fscache_n_attr_changed_nobufs);
215 _leave(" = %d", -ENOBUFS);
218 EXPORT_SYMBOL(__fscache_attr_changed);
221 * handle secondary execution given to a retrieval op on behalf of the
224 static void fscache_retrieval_work(struct work_struct *work)
226 struct fscache_retrieval *op =
227 container_of(work, struct fscache_retrieval, op.fast_work);
230 _enter("{OP%x}", op->op.debug_id);
233 op->op.processor(&op->op);
234 fscache_hist(fscache_ops_histogram, start);
235 fscache_put_operation(&op->op);
239 * release a retrieval op reference
241 static void fscache_release_retrieval_op(struct fscache_operation *_op)
243 struct fscache_retrieval *op =
244 container_of(_op, struct fscache_retrieval, op);
246 _enter("{OP%x}", op->op.debug_id);
248 fscache_hist(fscache_retrieval_histogram, op->start_time);
250 fscache_put_context(op->op.object->cookie, op->context);
256 * allocate a retrieval op
258 static struct fscache_retrieval *fscache_alloc_retrieval(
259 struct address_space *mapping,
260 fscache_rw_complete_t end_io_func,
263 struct fscache_retrieval *op;
265 /* allocate a retrieval operation and attempt to submit it */
266 op = kzalloc(sizeof(*op), GFP_NOIO);
268 fscache_stat(&fscache_n_retrievals_nomem);
272 fscache_operation_init(&op->op, fscache_release_retrieval_op);
273 op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
274 op->mapping = mapping;
275 op->end_io_func = end_io_func;
276 op->context = context;
277 op->start_time = jiffies;
278 INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
279 INIT_LIST_HEAD(&op->to_do);
280 fscache_set_op_name(&op->op, "Retr");
285 * wait for a deferred lookup to complete
287 static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
293 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
294 _leave(" = 0 [imm]");
298 fscache_stat(&fscache_n_retrievals_wait);
301 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
302 fscache_wait_bit_interruptible,
303 TASK_INTERRUPTIBLE) != 0) {
304 fscache_stat(&fscache_n_retrievals_intr);
305 _leave(" = -ERESTARTSYS");
309 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
312 fscache_hist(fscache_retrieval_delay_histogram, jif);
313 _leave(" = 0 [dly]");
318 * wait for an object to become active (or dead)
320 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
321 struct fscache_retrieval *op,
322 atomic_t *stat_op_waits,
323 atomic_t *stat_object_dead)
327 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
331 fscache_stat(stat_op_waits);
332 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
333 fscache_wait_bit_interruptible,
334 TASK_INTERRUPTIBLE) < 0) {
335 ret = fscache_cancel_op(&op->op);
339 /* it's been removed from the pending queue by another party,
340 * so we should get to run shortly */
341 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
342 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
347 if (unlikely(fscache_object_is_dead(object))) {
348 fscache_stat(stat_object_dead);
355 * read a page from the cache or allocate a block in which to store it
357 * -ENOMEM - out of memory, nothing done
358 * -ERESTARTSYS - interrupted
359 * -ENOBUFS - no backing object available in which to cache the block
360 * -ENODATA - no data available in the backing object for this block
361 * 0 - dispatched a read - it'll call end_io_func() when finished
363 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
365 fscache_rw_complete_t end_io_func,
369 struct fscache_retrieval *op;
370 struct fscache_object *object;
373 _enter("%p,%p,,,", cookie, page);
375 fscache_stat(&fscache_n_retrievals);
377 if (hlist_empty(&cookie->backing_objects))
380 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
381 ASSERTCMP(page, !=, NULL);
383 if (fscache_wait_for_deferred_lookup(cookie) < 0)
386 op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
388 _leave(" = -ENOMEM");
391 fscache_set_op_name(&op->op, "RetrRA1");
393 spin_lock(&cookie->lock);
395 if (hlist_empty(&cookie->backing_objects))
397 object = hlist_entry(cookie->backing_objects.first,
398 struct fscache_object, cookie_link);
400 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
402 atomic_inc(&object->n_reads);
403 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
405 if (fscache_submit_op(object, &op->op) < 0)
407 spin_unlock(&cookie->lock);
409 fscache_stat(&fscache_n_retrieval_ops);
411 /* pin the netfs read context in case we need to do the actual netfs
412 * read because we've encountered a cache read failure */
413 fscache_get_context(object->cookie, op->context);
415 /* we wait for the operation to become active, and then process it
416 * *here*, in this thread, and not in the thread pool */
417 ret = fscache_wait_for_retrieval_activation(
419 __fscache_stat(&fscache_n_retrieval_op_waits),
420 __fscache_stat(&fscache_n_retrievals_object_dead));
424 /* ask the cache to honour the operation */
425 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
426 fscache_stat(&fscache_n_cop_allocate_page);
427 ret = object->cache->ops->allocate_page(op, page, gfp);
428 fscache_stat_d(&fscache_n_cop_allocate_page);
432 fscache_stat(&fscache_n_cop_read_or_alloc_page);
433 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
434 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
439 fscache_stat(&fscache_n_retrievals_nomem);
440 else if (ret == -ERESTARTSYS)
441 fscache_stat(&fscache_n_retrievals_intr);
442 else if (ret == -ENODATA)
443 fscache_stat(&fscache_n_retrievals_nodata);
445 fscache_stat(&fscache_n_retrievals_nobufs);
447 fscache_stat(&fscache_n_retrievals_ok);
449 fscache_put_retrieval(op);
450 _leave(" = %d", ret);
454 spin_unlock(&cookie->lock);
457 fscache_stat(&fscache_n_retrievals_nobufs);
458 _leave(" = -ENOBUFS");
461 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
464 * read a list of page from the cache or allocate a block in which to store
467 * -ENOMEM - out of memory, some pages may be being read
468 * -ERESTARTSYS - interrupted, some pages may be being read
469 * -ENOBUFS - no backing object or space available in which to cache any
470 * pages not being read
471 * -ENODATA - no data available in the backing object for some or all of
473 * 0 - dispatched a read on all pages
475 * end_io_func() will be called for each page read from the cache as it is
476 * finishes being read
478 * any pages for which a read is dispatched will be removed from pages and
481 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
482 struct address_space *mapping,
483 struct list_head *pages,
485 fscache_rw_complete_t end_io_func,
489 struct fscache_retrieval *op;
490 struct fscache_object *object;
493 _enter("%p,,%d,,,", cookie, *nr_pages);
495 fscache_stat(&fscache_n_retrievals);
497 if (hlist_empty(&cookie->backing_objects))
500 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
501 ASSERTCMP(*nr_pages, >, 0);
502 ASSERT(!list_empty(pages));
504 if (fscache_wait_for_deferred_lookup(cookie) < 0)
507 op = fscache_alloc_retrieval(mapping, end_io_func, context);
510 fscache_set_op_name(&op->op, "RetrRAN");
512 spin_lock(&cookie->lock);
514 if (hlist_empty(&cookie->backing_objects))
516 object = hlist_entry(cookie->backing_objects.first,
517 struct fscache_object, cookie_link);
519 atomic_inc(&object->n_reads);
520 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
522 if (fscache_submit_op(object, &op->op) < 0)
524 spin_unlock(&cookie->lock);
526 fscache_stat(&fscache_n_retrieval_ops);
528 /* pin the netfs read context in case we need to do the actual netfs
529 * read because we've encountered a cache read failure */
530 fscache_get_context(object->cookie, op->context);
532 /* we wait for the operation to become active, and then process it
533 * *here*, in this thread, and not in the thread pool */
534 ret = fscache_wait_for_retrieval_activation(
536 __fscache_stat(&fscache_n_retrieval_op_waits),
537 __fscache_stat(&fscache_n_retrievals_object_dead));
541 /* ask the cache to honour the operation */
542 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
543 fscache_stat(&fscache_n_cop_allocate_pages);
544 ret = object->cache->ops->allocate_pages(
545 op, pages, nr_pages, gfp);
546 fscache_stat_d(&fscache_n_cop_allocate_pages);
548 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
549 ret = object->cache->ops->read_or_alloc_pages(
550 op, pages, nr_pages, gfp);
551 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
556 fscache_stat(&fscache_n_retrievals_nomem);
557 else if (ret == -ERESTARTSYS)
558 fscache_stat(&fscache_n_retrievals_intr);
559 else if (ret == -ENODATA)
560 fscache_stat(&fscache_n_retrievals_nodata);
562 fscache_stat(&fscache_n_retrievals_nobufs);
564 fscache_stat(&fscache_n_retrievals_ok);
566 fscache_put_retrieval(op);
567 _leave(" = %d", ret);
571 spin_unlock(&cookie->lock);
574 fscache_stat(&fscache_n_retrievals_nobufs);
575 _leave(" = -ENOBUFS");
578 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
581 * allocate a block in the cache on which to store a page
583 * -ENOMEM - out of memory, nothing done
584 * -ERESTARTSYS - interrupted
585 * -ENOBUFS - no backing object available in which to cache the block
586 * 0 - block allocated
588 int __fscache_alloc_page(struct fscache_cookie *cookie,
592 struct fscache_retrieval *op;
593 struct fscache_object *object;
596 _enter("%p,%p,,,", cookie, page);
598 fscache_stat(&fscache_n_allocs);
600 if (hlist_empty(&cookie->backing_objects))
603 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
604 ASSERTCMP(page, !=, NULL);
606 if (fscache_wait_for_deferred_lookup(cookie) < 0)
609 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
612 fscache_set_op_name(&op->op, "RetrAL1");
614 spin_lock(&cookie->lock);
616 if (hlist_empty(&cookie->backing_objects))
618 object = hlist_entry(cookie->backing_objects.first,
619 struct fscache_object, cookie_link);
621 if (fscache_submit_op(object, &op->op) < 0)
623 spin_unlock(&cookie->lock);
625 fscache_stat(&fscache_n_alloc_ops);
627 ret = fscache_wait_for_retrieval_activation(
629 __fscache_stat(&fscache_n_alloc_op_waits),
630 __fscache_stat(&fscache_n_allocs_object_dead));
634 /* ask the cache to honour the operation */
635 fscache_stat(&fscache_n_cop_allocate_page);
636 ret = object->cache->ops->allocate_page(op, page, gfp);
637 fscache_stat_d(&fscache_n_cop_allocate_page);
640 if (ret == -ERESTARTSYS)
641 fscache_stat(&fscache_n_allocs_intr);
643 fscache_stat(&fscache_n_allocs_nobufs);
645 fscache_stat(&fscache_n_allocs_ok);
647 fscache_put_retrieval(op);
648 _leave(" = %d", ret);
652 spin_unlock(&cookie->lock);
655 fscache_stat(&fscache_n_allocs_nobufs);
656 _leave(" = -ENOBUFS");
659 EXPORT_SYMBOL(__fscache_alloc_page);
662 * release a write op reference
664 static void fscache_release_write_op(struct fscache_operation *_op)
666 _enter("{OP%x}", _op->debug_id);
670 * perform the background storage of a page into the cache
672 static void fscache_write_op(struct fscache_operation *_op)
674 struct fscache_storage *op =
675 container_of(_op, struct fscache_storage, op);
676 struct fscache_object *object = op->op.object;
677 struct fscache_cookie *cookie;
683 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
685 fscache_set_op_state(&op->op, "GetPage");
687 spin_lock(&object->lock);
688 cookie = object->cookie;
690 if (!fscache_object_is_active(object) || !cookie) {
691 spin_unlock(&object->lock);
696 spin_lock(&cookie->stores_lock);
698 fscache_stat(&fscache_n_store_calls);
700 /* find a page to store */
702 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
703 FSCACHE_COOKIE_PENDING_TAG);
707 _debug("gang %d [%lx]", n, page->index);
708 if (page->index > op->store_limit) {
709 fscache_stat(&fscache_n_store_pages_over_limit);
713 radix_tree_tag_set(&cookie->stores, page->index,
714 FSCACHE_COOKIE_STORING_TAG);
715 radix_tree_tag_clear(&cookie->stores, page->index,
716 FSCACHE_COOKIE_PENDING_TAG);
718 spin_unlock(&cookie->stores_lock);
719 spin_unlock(&object->lock);
721 fscache_set_op_state(&op->op, "Store");
722 fscache_stat(&fscache_n_store_pages);
723 fscache_stat(&fscache_n_cop_write_page);
724 ret = object->cache->ops->write_page(op, page);
725 fscache_stat_d(&fscache_n_cop_write_page);
726 fscache_set_op_state(&op->op, "EndWrite");
727 fscache_end_page_write(object, page);
729 fscache_set_op_state(&op->op, "Abort");
730 fscache_abort_object(object);
732 fscache_enqueue_operation(&op->op);
739 /* this writer is going away and there aren't any more things to
742 spin_unlock(&cookie->stores_lock);
743 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
744 spin_unlock(&object->lock);
749 * request a page be stored in the cache
751 * -ENOMEM - out of memory, nothing done
752 * -ENOBUFS - no backing object available in which to cache the page
753 * 0 - dispatched a write - it'll call end_io_func() when finished
755 * if the cookie still has a backing object at this point, that object can be
756 * in one of a few states with respect to storage processing:
758 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
761 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
764 * (b) writes deferred till post-creation (mark page for writing and
765 * return immediately)
767 * (2) negative lookup, object created, initial fill being made from netfs
768 * (FSCACHE_COOKIE_INITIAL_FILL is set)
770 * (a) fill point not yet reached this page (mark page for writing and
773 * (b) fill point passed this page (queue op to store this page)
775 * (3) object extant (queue op to store this page)
777 * any other state is invalid
779 int __fscache_write_page(struct fscache_cookie *cookie,
783 struct fscache_storage *op;
784 struct fscache_object *object;
787 _enter("%p,%x,", cookie, (u32) page->flags);
789 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
790 ASSERT(PageFsCache(page));
792 fscache_stat(&fscache_n_stores);
794 op = kzalloc(sizeof(*op), GFP_NOIO);
798 fscache_operation_init(&op->op, fscache_release_write_op);
799 fscache_operation_init_slow(&op->op, fscache_write_op);
800 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
801 fscache_set_op_name(&op->op, "Write1");
803 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
808 spin_lock(&cookie->lock);
810 if (hlist_empty(&cookie->backing_objects))
812 object = hlist_entry(cookie->backing_objects.first,
813 struct fscache_object, cookie_link);
814 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
817 /* add the page to the pending-storage radix tree on the backing
819 spin_lock(&object->lock);
820 spin_lock(&cookie->stores_lock);
822 _debug("store limit %llx", (unsigned long long) object->store_limit);
824 ret = radix_tree_insert(&cookie->stores, page->index, page);
828 _debug("insert failed %d", ret);
829 goto nobufs_unlock_obj;
832 radix_tree_tag_set(&cookie->stores, page->index,
833 FSCACHE_COOKIE_PENDING_TAG);
834 page_cache_get(page);
836 /* we only want one writer at a time, but we do need to queue new
837 * writers after exclusive ops */
838 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
839 goto already_pending;
841 spin_unlock(&cookie->stores_lock);
842 spin_unlock(&object->lock);
844 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
845 op->store_limit = object->store_limit;
847 if (fscache_submit_op(object, &op->op) < 0)
850 spin_unlock(&cookie->lock);
851 radix_tree_preload_end();
852 fscache_stat(&fscache_n_store_ops);
853 fscache_stat(&fscache_n_stores_ok);
855 /* the slow work queue now carries its own ref on the object */
856 fscache_put_operation(&op->op);
861 fscache_stat(&fscache_n_stores_again);
863 spin_unlock(&cookie->stores_lock);
864 spin_unlock(&object->lock);
865 spin_unlock(&cookie->lock);
866 radix_tree_preload_end();
868 fscache_stat(&fscache_n_stores_ok);
873 spin_lock(&cookie->stores_lock);
874 radix_tree_delete(&cookie->stores, page->index);
875 spin_unlock(&cookie->stores_lock);
876 page_cache_release(page);
881 spin_unlock(&cookie->stores_lock);
882 spin_unlock(&object->lock);
884 spin_unlock(&cookie->lock);
885 radix_tree_preload_end();
887 fscache_stat(&fscache_n_stores_nobufs);
888 _leave(" = -ENOBUFS");
894 fscache_stat(&fscache_n_stores_oom);
895 _leave(" = -ENOMEM");
898 EXPORT_SYMBOL(__fscache_write_page);
901 * remove a page from the cache
903 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
905 struct fscache_object *object;
909 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
910 ASSERTCMP(page, !=, NULL);
912 fscache_stat(&fscache_n_uncaches);
914 /* cache withdrawal may beat us to it */
915 if (!PageFsCache(page))
919 spin_lock(&cookie->lock);
921 if (hlist_empty(&cookie->backing_objects)) {
922 ClearPageFsCache(page);
926 object = hlist_entry(cookie->backing_objects.first,
927 struct fscache_object, cookie_link);
929 /* there might now be stuff on disk we could read */
930 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
932 /* only invoke the cache backend if we managed to mark the page
933 * uncached here; this deals with synchronisation vs withdrawal */
934 if (TestClearPageFsCache(page) &&
935 object->cache->ops->uncache_page) {
936 /* the cache backend releases the cookie lock */
937 fscache_stat(&fscache_n_cop_uncache_page);
938 object->cache->ops->uncache_page(object, page);
939 fscache_stat_d(&fscache_n_cop_uncache_page);
944 spin_unlock(&cookie->lock);
948 EXPORT_SYMBOL(__fscache_uncache_page);
951 * fscache_mark_pages_cached - Mark pages as being cached
952 * @op: The retrieval op pages are being marked for
953 * @pagevec: The pages to be marked
955 * Mark a bunch of netfs pages as being cached. After this is called,
956 * the netfs must call fscache_uncache_page() to remove the mark.
958 void fscache_mark_pages_cached(struct fscache_retrieval *op,
959 struct pagevec *pagevec)
961 struct fscache_cookie *cookie = op->op.object->cookie;
964 #ifdef CONFIG_FSCACHE_STATS
965 atomic_add(pagevec->nr, &fscache_n_marks);
968 for (loop = 0; loop < pagevec->nr; loop++) {
969 struct page *page = pagevec->pages[loop];
971 _debug("- mark %p{%lx}", page, page->index);
972 if (TestSetPageFsCache(page)) {
973 static bool once_only;
976 printk(KERN_WARNING "FS-Cache:"
977 " Cookie type %s marked page %lx"
979 cookie->def->name, page->index);
984 if (cookie->def->mark_pages_cached)
985 cookie->def->mark_pages_cached(cookie->netfs_data,
986 op->mapping, pagevec);
987 pagevec_reinit(pagevec);
989 EXPORT_SYMBOL(fscache_mark_pages_cached);