Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[firefly-linux-kernel-4.4.55.git] / fs / fscache / page.c
1 /* Cache page management and data I/O routines
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 /*
21  * check to see if a page is being written to the cache
22  */
23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24 {
25         void *val;
26
27         rcu_read_lock();
28         val = radix_tree_lookup(&cookie->stores, page->index);
29         rcu_read_unlock();
30
31         return val != NULL;
32 }
33 EXPORT_SYMBOL(__fscache_check_page_write);
34
35 /*
36  * wait for a page to finish being written to the cache
37  */
38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39 {
40         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42         wait_event(*wq, !__fscache_check_page_write(cookie, page));
43 }
44 EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46 /*
47  * decide whether a page can be released, possibly by cancelling a store to it
48  * - we're allowed to sleep if __GFP_WAIT is flagged
49  */
50 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51                                   struct page *page,
52                                   gfp_t gfp)
53 {
54         struct page *xpage;
55         void *val;
56
57         _enter("%p,%p,%x", cookie, page, gfp);
58
59 try_again:
60         rcu_read_lock();
61         val = radix_tree_lookup(&cookie->stores, page->index);
62         if (!val) {
63                 rcu_read_unlock();
64                 fscache_stat(&fscache_n_store_vmscan_not_storing);
65                 __fscache_uncache_page(cookie, page);
66                 return true;
67         }
68
69         /* see if the page is actually undergoing storage - if so we can't get
70          * rid of it till the cache has finished with it */
71         if (radix_tree_tag_get(&cookie->stores, page->index,
72                                FSCACHE_COOKIE_STORING_TAG)) {
73                 rcu_read_unlock();
74                 goto page_busy;
75         }
76
77         /* the page is pending storage, so we attempt to cancel the store and
78          * discard the store request so that the page can be reclaimed */
79         spin_lock(&cookie->stores_lock);
80         rcu_read_unlock();
81
82         if (radix_tree_tag_get(&cookie->stores, page->index,
83                                FSCACHE_COOKIE_STORING_TAG)) {
84                 /* the page started to undergo storage whilst we were looking,
85                  * so now we can only wait or return */
86                 spin_unlock(&cookie->stores_lock);
87                 goto page_busy;
88         }
89
90         xpage = radix_tree_delete(&cookie->stores, page->index);
91         spin_unlock(&cookie->stores_lock);
92
93         if (xpage) {
94                 fscache_stat(&fscache_n_store_vmscan_cancelled);
95                 fscache_stat(&fscache_n_store_radix_deletes);
96                 ASSERTCMP(xpage, ==, page);
97         } else {
98                 fscache_stat(&fscache_n_store_vmscan_gone);
99         }
100
101         wake_up_bit(&cookie->flags, 0);
102         if (xpage)
103                 page_cache_release(xpage);
104         __fscache_uncache_page(cookie, page);
105         return true;
106
107 page_busy:
108         /* We will wait here if we're allowed to, but that could deadlock the
109          * allocator as the work threads writing to the cache may all end up
110          * sleeping on memory allocation, so we may need to impose a timeout
111          * too. */
112         if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
113                 fscache_stat(&fscache_n_store_vmscan_busy);
114                 return false;
115         }
116
117         fscache_stat(&fscache_n_store_vmscan_wait);
118         __fscache_wait_on_page_write(cookie, page);
119         gfp &= ~__GFP_WAIT;
120         goto try_again;
121 }
122 EXPORT_SYMBOL(__fscache_maybe_release_page);
123
124 /*
125  * note that a page has finished being written to the cache
126  */
127 static void fscache_end_page_write(struct fscache_object *object,
128                                    struct page *page)
129 {
130         struct fscache_cookie *cookie;
131         struct page *xpage = NULL;
132
133         spin_lock(&object->lock);
134         cookie = object->cookie;
135         if (cookie) {
136                 /* delete the page from the tree if it is now no longer
137                  * pending */
138                 spin_lock(&cookie->stores_lock);
139                 radix_tree_tag_clear(&cookie->stores, page->index,
140                                      FSCACHE_COOKIE_STORING_TAG);
141                 if (!radix_tree_tag_get(&cookie->stores, page->index,
142                                         FSCACHE_COOKIE_PENDING_TAG)) {
143                         fscache_stat(&fscache_n_store_radix_deletes);
144                         xpage = radix_tree_delete(&cookie->stores, page->index);
145                 }
146                 spin_unlock(&cookie->stores_lock);
147                 wake_up_bit(&cookie->flags, 0);
148         }
149         spin_unlock(&object->lock);
150         if (xpage)
151                 page_cache_release(xpage);
152 }
153
154 /*
155  * actually apply the changed attributes to a cache object
156  */
157 static void fscache_attr_changed_op(struct fscache_operation *op)
158 {
159         struct fscache_object *object = op->object;
160         int ret;
161
162         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
163
164         fscache_stat(&fscache_n_attr_changed_calls);
165
166         if (fscache_object_is_active(object)) {
167                 fscache_stat(&fscache_n_cop_attr_changed);
168                 ret = object->cache->ops->attr_changed(object);
169                 fscache_stat_d(&fscache_n_cop_attr_changed);
170                 if (ret < 0)
171                         fscache_abort_object(object);
172         }
173
174         fscache_op_complete(op, true);
175         _leave("");
176 }
177
178 /*
179  * notification that the attributes on an object have changed
180  */
181 int __fscache_attr_changed(struct fscache_cookie *cookie)
182 {
183         struct fscache_operation *op;
184         struct fscache_object *object;
185         bool wake_cookie;
186
187         _enter("%p", cookie);
188
189         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
190
191         fscache_stat(&fscache_n_attr_changed);
192
193         op = kzalloc(sizeof(*op), GFP_KERNEL);
194         if (!op) {
195                 fscache_stat(&fscache_n_attr_changed_nomem);
196                 _leave(" = -ENOMEM");
197                 return -ENOMEM;
198         }
199
200         fscache_operation_init(op, fscache_attr_changed_op, NULL);
201         op->flags = FSCACHE_OP_ASYNC |
202                 (1 << FSCACHE_OP_EXCLUSIVE) |
203                 (1 << FSCACHE_OP_UNUSE_COOKIE);
204
205         spin_lock(&cookie->lock);
206
207         if (!fscache_cookie_enabled(cookie) ||
208             hlist_empty(&cookie->backing_objects))
209                 goto nobufs;
210         object = hlist_entry(cookie->backing_objects.first,
211                              struct fscache_object, cookie_link);
212
213         __fscache_use_cookie(cookie);
214         if (fscache_submit_exclusive_op(object, op) < 0)
215                 goto nobufs;
216         spin_unlock(&cookie->lock);
217         fscache_stat(&fscache_n_attr_changed_ok);
218         fscache_put_operation(op);
219         _leave(" = 0");
220         return 0;
221
222 nobufs:
223         wake_cookie = __fscache_unuse_cookie(cookie);
224         spin_unlock(&cookie->lock);
225         kfree(op);
226         if (wake_cookie)
227                 __fscache_wake_unused_cookie(cookie);
228         fscache_stat(&fscache_n_attr_changed_nobufs);
229         _leave(" = %d", -ENOBUFS);
230         return -ENOBUFS;
231 }
232 EXPORT_SYMBOL(__fscache_attr_changed);
233
234 /*
235  * release a retrieval op reference
236  */
237 static void fscache_release_retrieval_op(struct fscache_operation *_op)
238 {
239         struct fscache_retrieval *op =
240                 container_of(_op, struct fscache_retrieval, op);
241
242         _enter("{OP%x}", op->op.debug_id);
243
244         ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
245
246         fscache_hist(fscache_retrieval_histogram, op->start_time);
247         if (op->context)
248                 fscache_put_context(op->op.object->cookie, op->context);
249
250         _leave("");
251 }
252
253 /*
254  * allocate a retrieval op
255  */
256 static struct fscache_retrieval *fscache_alloc_retrieval(
257         struct fscache_cookie *cookie,
258         struct address_space *mapping,
259         fscache_rw_complete_t end_io_func,
260         void *context)
261 {
262         struct fscache_retrieval *op;
263
264         /* allocate a retrieval operation and attempt to submit it */
265         op = kzalloc(sizeof(*op), GFP_NOIO);
266         if (!op) {
267                 fscache_stat(&fscache_n_retrievals_nomem);
268                 return NULL;
269         }
270
271         fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
272         op->op.flags    = FSCACHE_OP_MYTHREAD |
273                 (1UL << FSCACHE_OP_WAITING) |
274                 (1UL << FSCACHE_OP_UNUSE_COOKIE);
275         op->mapping     = mapping;
276         op->end_io_func = end_io_func;
277         op->context     = context;
278         op->start_time  = jiffies;
279         INIT_LIST_HEAD(&op->to_do);
280         return op;
281 }
282
283 /*
284  * wait for a deferred lookup to complete
285  */
286 int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
287 {
288         unsigned long jif;
289
290         _enter("");
291
292         if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
293                 _leave(" = 0 [imm]");
294                 return 0;
295         }
296
297         fscache_stat(&fscache_n_retrievals_wait);
298
299         jif = jiffies;
300         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
301                         TASK_INTERRUPTIBLE) != 0) {
302                 fscache_stat(&fscache_n_retrievals_intr);
303                 _leave(" = -ERESTARTSYS");
304                 return -ERESTARTSYS;
305         }
306
307         ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
308
309         smp_rmb();
310         fscache_hist(fscache_retrieval_delay_histogram, jif);
311         _leave(" = 0 [dly]");
312         return 0;
313 }
314
315 /*
316  * Handle cancellation of a pending retrieval op
317  */
318 static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
319 {
320         struct fscache_retrieval *op =
321                 container_of(_op, struct fscache_retrieval, op);
322
323         atomic_set(&op->n_pages, 0);
324 }
325
326 /*
327  * wait for an object to become active (or dead)
328  */
329 int fscache_wait_for_operation_activation(struct fscache_object *object,
330                                           struct fscache_operation *op,
331                                           atomic_t *stat_op_waits,
332                                           atomic_t *stat_object_dead,
333                                           void (*do_cancel)(struct fscache_operation *))
334 {
335         int ret;
336
337         if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
338                 goto check_if_dead;
339
340         _debug(">>> WT");
341         if (stat_op_waits)
342                 fscache_stat(stat_op_waits);
343         if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
344                         TASK_INTERRUPTIBLE) != 0) {
345                 ret = fscache_cancel_op(op, do_cancel);
346                 if (ret == 0)
347                         return -ERESTARTSYS;
348
349                 /* it's been removed from the pending queue by another party,
350                  * so we should get to run shortly */
351                 wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
352                             TASK_UNINTERRUPTIBLE);
353         }
354         _debug("<<< GO");
355
356 check_if_dead:
357         if (op->state == FSCACHE_OP_ST_CANCELLED) {
358                 if (stat_object_dead)
359                         fscache_stat(stat_object_dead);
360                 _leave(" = -ENOBUFS [cancelled]");
361                 return -ENOBUFS;
362         }
363         if (unlikely(fscache_object_is_dead(object))) {
364                 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
365                 fscache_cancel_op(op, do_cancel);
366                 if (stat_object_dead)
367                         fscache_stat(stat_object_dead);
368                 return -ENOBUFS;
369         }
370         return 0;
371 }
372
373 /*
374  * read a page from the cache or allocate a block in which to store it
375  * - we return:
376  *   -ENOMEM    - out of memory, nothing done
377  *   -ERESTARTSYS - interrupted
378  *   -ENOBUFS   - no backing object available in which to cache the block
379  *   -ENODATA   - no data available in the backing object for this block
380  *   0          - dispatched a read - it'll call end_io_func() when finished
381  */
382 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
383                                  struct page *page,
384                                  fscache_rw_complete_t end_io_func,
385                                  void *context,
386                                  gfp_t gfp)
387 {
388         struct fscache_retrieval *op;
389         struct fscache_object *object;
390         bool wake_cookie = false;
391         int ret;
392
393         _enter("%p,%p,,,", cookie, page);
394
395         fscache_stat(&fscache_n_retrievals);
396
397         if (hlist_empty(&cookie->backing_objects))
398                 goto nobufs;
399
400         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
401                 _leave(" = -ENOBUFS [invalidating]");
402                 return -ENOBUFS;
403         }
404
405         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
406         ASSERTCMP(page, !=, NULL);
407
408         if (fscache_wait_for_deferred_lookup(cookie) < 0)
409                 return -ERESTARTSYS;
410
411         op = fscache_alloc_retrieval(cookie, page->mapping,
412                                      end_io_func, context);
413         if (!op) {
414                 _leave(" = -ENOMEM");
415                 return -ENOMEM;
416         }
417         atomic_set(&op->n_pages, 1);
418
419         spin_lock(&cookie->lock);
420
421         if (!fscache_cookie_enabled(cookie) ||
422             hlist_empty(&cookie->backing_objects))
423                 goto nobufs_unlock;
424         object = hlist_entry(cookie->backing_objects.first,
425                              struct fscache_object, cookie_link);
426
427         ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
428
429         __fscache_use_cookie(cookie);
430         atomic_inc(&object->n_reads);
431         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
432
433         if (fscache_submit_op(object, &op->op) < 0)
434                 goto nobufs_unlock_dec;
435         spin_unlock(&cookie->lock);
436
437         fscache_stat(&fscache_n_retrieval_ops);
438
439         /* pin the netfs read context in case we need to do the actual netfs
440          * read because we've encountered a cache read failure */
441         fscache_get_context(object->cookie, op->context);
442
443         /* we wait for the operation to become active, and then process it
444          * *here*, in this thread, and not in the thread pool */
445         ret = fscache_wait_for_operation_activation(
446                 object, &op->op,
447                 __fscache_stat(&fscache_n_retrieval_op_waits),
448                 __fscache_stat(&fscache_n_retrievals_object_dead),
449                 fscache_do_cancel_retrieval);
450         if (ret < 0)
451                 goto error;
452
453         /* ask the cache to honour the operation */
454         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
455                 fscache_stat(&fscache_n_cop_allocate_page);
456                 ret = object->cache->ops->allocate_page(op, page, gfp);
457                 fscache_stat_d(&fscache_n_cop_allocate_page);
458                 if (ret == 0)
459                         ret = -ENODATA;
460         } else {
461                 fscache_stat(&fscache_n_cop_read_or_alloc_page);
462                 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
463                 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
464         }
465
466 error:
467         if (ret == -ENOMEM)
468                 fscache_stat(&fscache_n_retrievals_nomem);
469         else if (ret == -ERESTARTSYS)
470                 fscache_stat(&fscache_n_retrievals_intr);
471         else if (ret == -ENODATA)
472                 fscache_stat(&fscache_n_retrievals_nodata);
473         else if (ret < 0)
474                 fscache_stat(&fscache_n_retrievals_nobufs);
475         else
476                 fscache_stat(&fscache_n_retrievals_ok);
477
478         fscache_put_retrieval(op);
479         _leave(" = %d", ret);
480         return ret;
481
482 nobufs_unlock_dec:
483         atomic_dec(&object->n_reads);
484         wake_cookie = __fscache_unuse_cookie(cookie);
485 nobufs_unlock:
486         spin_unlock(&cookie->lock);
487         if (wake_cookie)
488                 __fscache_wake_unused_cookie(cookie);
489         kfree(op);
490 nobufs:
491         fscache_stat(&fscache_n_retrievals_nobufs);
492         _leave(" = -ENOBUFS");
493         return -ENOBUFS;
494 }
495 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
496
497 /*
498  * read a list of page from the cache or allocate a block in which to store
499  * them
500  * - we return:
501  *   -ENOMEM    - out of memory, some pages may be being read
502  *   -ERESTARTSYS - interrupted, some pages may be being read
503  *   -ENOBUFS   - no backing object or space available in which to cache any
504  *                pages not being read
505  *   -ENODATA   - no data available in the backing object for some or all of
506  *                the pages
507  *   0          - dispatched a read on all pages
508  *
509  * end_io_func() will be called for each page read from the cache as it is
510  * finishes being read
511  *
512  * any pages for which a read is dispatched will be removed from pages and
513  * nr_pages
514  */
515 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
516                                   struct address_space *mapping,
517                                   struct list_head *pages,
518                                   unsigned *nr_pages,
519                                   fscache_rw_complete_t end_io_func,
520                                   void *context,
521                                   gfp_t gfp)
522 {
523         struct fscache_retrieval *op;
524         struct fscache_object *object;
525         bool wake_cookie = false;
526         int ret;
527
528         _enter("%p,,%d,,,", cookie, *nr_pages);
529
530         fscache_stat(&fscache_n_retrievals);
531
532         if (hlist_empty(&cookie->backing_objects))
533                 goto nobufs;
534
535         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
536                 _leave(" = -ENOBUFS [invalidating]");
537                 return -ENOBUFS;
538         }
539
540         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
541         ASSERTCMP(*nr_pages, >, 0);
542         ASSERT(!list_empty(pages));
543
544         if (fscache_wait_for_deferred_lookup(cookie) < 0)
545                 return -ERESTARTSYS;
546
547         op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
548         if (!op)
549                 return -ENOMEM;
550         atomic_set(&op->n_pages, *nr_pages);
551
552         spin_lock(&cookie->lock);
553
554         if (!fscache_cookie_enabled(cookie) ||
555             hlist_empty(&cookie->backing_objects))
556                 goto nobufs_unlock;
557         object = hlist_entry(cookie->backing_objects.first,
558                              struct fscache_object, cookie_link);
559
560         __fscache_use_cookie(cookie);
561         atomic_inc(&object->n_reads);
562         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
563
564         if (fscache_submit_op(object, &op->op) < 0)
565                 goto nobufs_unlock_dec;
566         spin_unlock(&cookie->lock);
567
568         fscache_stat(&fscache_n_retrieval_ops);
569
570         /* pin the netfs read context in case we need to do the actual netfs
571          * read because we've encountered a cache read failure */
572         fscache_get_context(object->cookie, op->context);
573
574         /* we wait for the operation to become active, and then process it
575          * *here*, in this thread, and not in the thread pool */
576         ret = fscache_wait_for_operation_activation(
577                 object, &op->op,
578                 __fscache_stat(&fscache_n_retrieval_op_waits),
579                 __fscache_stat(&fscache_n_retrievals_object_dead),
580                 fscache_do_cancel_retrieval);
581         if (ret < 0)
582                 goto error;
583
584         /* ask the cache to honour the operation */
585         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
586                 fscache_stat(&fscache_n_cop_allocate_pages);
587                 ret = object->cache->ops->allocate_pages(
588                         op, pages, nr_pages, gfp);
589                 fscache_stat_d(&fscache_n_cop_allocate_pages);
590         } else {
591                 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
592                 ret = object->cache->ops->read_or_alloc_pages(
593                         op, pages, nr_pages, gfp);
594                 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
595         }
596
597 error:
598         if (ret == -ENOMEM)
599                 fscache_stat(&fscache_n_retrievals_nomem);
600         else if (ret == -ERESTARTSYS)
601                 fscache_stat(&fscache_n_retrievals_intr);
602         else if (ret == -ENODATA)
603                 fscache_stat(&fscache_n_retrievals_nodata);
604         else if (ret < 0)
605                 fscache_stat(&fscache_n_retrievals_nobufs);
606         else
607                 fscache_stat(&fscache_n_retrievals_ok);
608
609         fscache_put_retrieval(op);
610         _leave(" = %d", ret);
611         return ret;
612
613 nobufs_unlock_dec:
614         atomic_dec(&object->n_reads);
615         wake_cookie = __fscache_unuse_cookie(cookie);
616 nobufs_unlock:
617         spin_unlock(&cookie->lock);
618         kfree(op);
619         if (wake_cookie)
620                 __fscache_wake_unused_cookie(cookie);
621 nobufs:
622         fscache_stat(&fscache_n_retrievals_nobufs);
623         _leave(" = -ENOBUFS");
624         return -ENOBUFS;
625 }
626 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
627
628 /*
629  * allocate a block in the cache on which to store a page
630  * - we return:
631  *   -ENOMEM    - out of memory, nothing done
632  *   -ERESTARTSYS - interrupted
633  *   -ENOBUFS   - no backing object available in which to cache the block
634  *   0          - block allocated
635  */
636 int __fscache_alloc_page(struct fscache_cookie *cookie,
637                          struct page *page,
638                          gfp_t gfp)
639 {
640         struct fscache_retrieval *op;
641         struct fscache_object *object;
642         bool wake_cookie = false;
643         int ret;
644
645         _enter("%p,%p,,,", cookie, page);
646
647         fscache_stat(&fscache_n_allocs);
648
649         if (hlist_empty(&cookie->backing_objects))
650                 goto nobufs;
651
652         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
653         ASSERTCMP(page, !=, NULL);
654
655         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
656                 _leave(" = -ENOBUFS [invalidating]");
657                 return -ENOBUFS;
658         }
659
660         if (fscache_wait_for_deferred_lookup(cookie) < 0)
661                 return -ERESTARTSYS;
662
663         op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
664         if (!op)
665                 return -ENOMEM;
666         atomic_set(&op->n_pages, 1);
667
668         spin_lock(&cookie->lock);
669
670         if (!fscache_cookie_enabled(cookie) ||
671             hlist_empty(&cookie->backing_objects))
672                 goto nobufs_unlock;
673         object = hlist_entry(cookie->backing_objects.first,
674                              struct fscache_object, cookie_link);
675
676         __fscache_use_cookie(cookie);
677         if (fscache_submit_op(object, &op->op) < 0)
678                 goto nobufs_unlock_dec;
679         spin_unlock(&cookie->lock);
680
681         fscache_stat(&fscache_n_alloc_ops);
682
683         ret = fscache_wait_for_operation_activation(
684                 object, &op->op,
685                 __fscache_stat(&fscache_n_alloc_op_waits),
686                 __fscache_stat(&fscache_n_allocs_object_dead),
687                 fscache_do_cancel_retrieval);
688         if (ret < 0)
689                 goto error;
690
691         /* ask the cache to honour the operation */
692         fscache_stat(&fscache_n_cop_allocate_page);
693         ret = object->cache->ops->allocate_page(op, page, gfp);
694         fscache_stat_d(&fscache_n_cop_allocate_page);
695
696 error:
697         if (ret == -ERESTARTSYS)
698                 fscache_stat(&fscache_n_allocs_intr);
699         else if (ret < 0)
700                 fscache_stat(&fscache_n_allocs_nobufs);
701         else
702                 fscache_stat(&fscache_n_allocs_ok);
703
704         fscache_put_retrieval(op);
705         _leave(" = %d", ret);
706         return ret;
707
708 nobufs_unlock_dec:
709         wake_cookie = __fscache_unuse_cookie(cookie);
710 nobufs_unlock:
711         spin_unlock(&cookie->lock);
712         kfree(op);
713         if (wake_cookie)
714                 __fscache_wake_unused_cookie(cookie);
715 nobufs:
716         fscache_stat(&fscache_n_allocs_nobufs);
717         _leave(" = -ENOBUFS");
718         return -ENOBUFS;
719 }
720 EXPORT_SYMBOL(__fscache_alloc_page);
721
722 /*
723  * Unmark pages allocate in the readahead code path (via:
724  * fscache_readpages_or_alloc) after delegating to the base filesystem
725  */
726 void __fscache_readpages_cancel(struct fscache_cookie *cookie,
727                                 struct list_head *pages)
728 {
729         struct page *page;
730
731         list_for_each_entry(page, pages, lru) {
732                 if (PageFsCache(page))
733                         __fscache_uncache_page(cookie, page);
734         }
735 }
736 EXPORT_SYMBOL(__fscache_readpages_cancel);
737
738 /*
739  * release a write op reference
740  */
741 static void fscache_release_write_op(struct fscache_operation *_op)
742 {
743         _enter("{OP%x}", _op->debug_id);
744 }
745
746 /*
747  * perform the background storage of a page into the cache
748  */
749 static void fscache_write_op(struct fscache_operation *_op)
750 {
751         struct fscache_storage *op =
752                 container_of(_op, struct fscache_storage, op);
753         struct fscache_object *object = op->op.object;
754         struct fscache_cookie *cookie;
755         struct page *page;
756         unsigned n;
757         void *results[1];
758         int ret;
759
760         _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
761
762         spin_lock(&object->lock);
763         cookie = object->cookie;
764
765         if (!fscache_object_is_active(object)) {
766                 /* If we get here, then the on-disk cache object likely longer
767                  * exists, so we should just cancel this write operation.
768                  */
769                 spin_unlock(&object->lock);
770                 fscache_op_complete(&op->op, false);
771                 _leave(" [inactive]");
772                 return;
773         }
774
775         if (!cookie) {
776                 /* If we get here, then the cookie belonging to the object was
777                  * detached, probably by the cookie being withdrawn due to
778                  * memory pressure, which means that the pages we might write
779                  * to the cache from no longer exist - therefore, we can just
780                  * cancel this write operation.
781                  */
782                 spin_unlock(&object->lock);
783                 fscache_op_complete(&op->op, false);
784                 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
785                        _op->flags, _op->state, object->state->short_name,
786                        object->flags);
787                 return;
788         }
789
790         spin_lock(&cookie->stores_lock);
791
792         fscache_stat(&fscache_n_store_calls);
793
794         /* find a page to store */
795         page = NULL;
796         n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
797                                        FSCACHE_COOKIE_PENDING_TAG);
798         if (n != 1)
799                 goto superseded;
800         page = results[0];
801         _debug("gang %d [%lx]", n, page->index);
802         if (page->index > op->store_limit) {
803                 fscache_stat(&fscache_n_store_pages_over_limit);
804                 goto superseded;
805         }
806
807         radix_tree_tag_set(&cookie->stores, page->index,
808                            FSCACHE_COOKIE_STORING_TAG);
809         radix_tree_tag_clear(&cookie->stores, page->index,
810                              FSCACHE_COOKIE_PENDING_TAG);
811
812         spin_unlock(&cookie->stores_lock);
813         spin_unlock(&object->lock);
814
815         fscache_stat(&fscache_n_store_pages);
816         fscache_stat(&fscache_n_cop_write_page);
817         ret = object->cache->ops->write_page(op, page);
818         fscache_stat_d(&fscache_n_cop_write_page);
819         fscache_end_page_write(object, page);
820         if (ret < 0) {
821                 fscache_abort_object(object);
822                 fscache_op_complete(&op->op, true);
823         } else {
824                 fscache_enqueue_operation(&op->op);
825         }
826
827         _leave("");
828         return;
829
830 superseded:
831         /* this writer is going away and there aren't any more things to
832          * write */
833         _debug("cease");
834         spin_unlock(&cookie->stores_lock);
835         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
836         spin_unlock(&object->lock);
837         fscache_op_complete(&op->op, true);
838         _leave("");
839 }
840
841 /*
842  * Clear the pages pending writing for invalidation
843  */
844 void fscache_invalidate_writes(struct fscache_cookie *cookie)
845 {
846         struct page *page;
847         void *results[16];
848         int n, i;
849
850         _enter("");
851
852         for (;;) {
853                 spin_lock(&cookie->stores_lock);
854                 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
855                                                ARRAY_SIZE(results),
856                                                FSCACHE_COOKIE_PENDING_TAG);
857                 if (n == 0) {
858                         spin_unlock(&cookie->stores_lock);
859                         break;
860                 }
861
862                 for (i = n - 1; i >= 0; i--) {
863                         page = results[i];
864                         radix_tree_delete(&cookie->stores, page->index);
865                 }
866
867                 spin_unlock(&cookie->stores_lock);
868
869                 for (i = n - 1; i >= 0; i--)
870                         page_cache_release(results[i]);
871         }
872
873         _leave("");
874 }
875
876 /*
877  * request a page be stored in the cache
878  * - returns:
879  *   -ENOMEM    - out of memory, nothing done
880  *   -ENOBUFS   - no backing object available in which to cache the page
881  *   0          - dispatched a write - it'll call end_io_func() when finished
882  *
883  * if the cookie still has a backing object at this point, that object can be
884  * in one of a few states with respect to storage processing:
885  *
886  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
887  *      set)
888  *
889  *      (a) no writes yet
890  *
891  *      (b) writes deferred till post-creation (mark page for writing and
892  *          return immediately)
893  *
894  *  (2) negative lookup, object created, initial fill being made from netfs
895  *
896  *      (a) fill point not yet reached this page (mark page for writing and
897  *          return)
898  *
899  *      (b) fill point passed this page (queue op to store this page)
900  *
901  *  (3) object extant (queue op to store this page)
902  *
903  * any other state is invalid
904  */
905 int __fscache_write_page(struct fscache_cookie *cookie,
906                          struct page *page,
907                          gfp_t gfp)
908 {
909         struct fscache_storage *op;
910         struct fscache_object *object;
911         bool wake_cookie = false;
912         int ret;
913
914         _enter("%p,%x,", cookie, (u32) page->flags);
915
916         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
917         ASSERT(PageFsCache(page));
918
919         fscache_stat(&fscache_n_stores);
920
921         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
922                 _leave(" = -ENOBUFS [invalidating]");
923                 return -ENOBUFS;
924         }
925
926         op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
927         if (!op)
928                 goto nomem;
929
930         fscache_operation_init(&op->op, fscache_write_op,
931                                fscache_release_write_op);
932         op->op.flags = FSCACHE_OP_ASYNC |
933                 (1 << FSCACHE_OP_WAITING) |
934                 (1 << FSCACHE_OP_UNUSE_COOKIE);
935
936         ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
937         if (ret < 0)
938                 goto nomem_free;
939
940         ret = -ENOBUFS;
941         spin_lock(&cookie->lock);
942
943         if (!fscache_cookie_enabled(cookie) ||
944             hlist_empty(&cookie->backing_objects))
945                 goto nobufs;
946         object = hlist_entry(cookie->backing_objects.first,
947                              struct fscache_object, cookie_link);
948         if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
949                 goto nobufs;
950
951         /* add the page to the pending-storage radix tree on the backing
952          * object */
953         spin_lock(&object->lock);
954         spin_lock(&cookie->stores_lock);
955
956         _debug("store limit %llx", (unsigned long long) object->store_limit);
957
958         ret = radix_tree_insert(&cookie->stores, page->index, page);
959         if (ret < 0) {
960                 if (ret == -EEXIST)
961                         goto already_queued;
962                 _debug("insert failed %d", ret);
963                 goto nobufs_unlock_obj;
964         }
965
966         radix_tree_tag_set(&cookie->stores, page->index,
967                            FSCACHE_COOKIE_PENDING_TAG);
968         page_cache_get(page);
969
970         /* we only want one writer at a time, but we do need to queue new
971          * writers after exclusive ops */
972         if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
973                 goto already_pending;
974
975         spin_unlock(&cookie->stores_lock);
976         spin_unlock(&object->lock);
977
978         op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
979         op->store_limit = object->store_limit;
980
981         __fscache_use_cookie(cookie);
982         if (fscache_submit_op(object, &op->op) < 0)
983                 goto submit_failed;
984
985         spin_unlock(&cookie->lock);
986         radix_tree_preload_end();
987         fscache_stat(&fscache_n_store_ops);
988         fscache_stat(&fscache_n_stores_ok);
989
990         /* the work queue now carries its own ref on the object */
991         fscache_put_operation(&op->op);
992         _leave(" = 0");
993         return 0;
994
995 already_queued:
996         fscache_stat(&fscache_n_stores_again);
997 already_pending:
998         spin_unlock(&cookie->stores_lock);
999         spin_unlock(&object->lock);
1000         spin_unlock(&cookie->lock);
1001         radix_tree_preload_end();
1002         kfree(op);
1003         fscache_stat(&fscache_n_stores_ok);
1004         _leave(" = 0");
1005         return 0;
1006
1007 submit_failed:
1008         spin_lock(&cookie->stores_lock);
1009         radix_tree_delete(&cookie->stores, page->index);
1010         spin_unlock(&cookie->stores_lock);
1011         wake_cookie = __fscache_unuse_cookie(cookie);
1012         page_cache_release(page);
1013         ret = -ENOBUFS;
1014         goto nobufs;
1015
1016 nobufs_unlock_obj:
1017         spin_unlock(&cookie->stores_lock);
1018         spin_unlock(&object->lock);
1019 nobufs:
1020         spin_unlock(&cookie->lock);
1021         radix_tree_preload_end();
1022         kfree(op);
1023         if (wake_cookie)
1024                 __fscache_wake_unused_cookie(cookie);
1025         fscache_stat(&fscache_n_stores_nobufs);
1026         _leave(" = -ENOBUFS");
1027         return -ENOBUFS;
1028
1029 nomem_free:
1030         kfree(op);
1031 nomem:
1032         fscache_stat(&fscache_n_stores_oom);
1033         _leave(" = -ENOMEM");
1034         return -ENOMEM;
1035 }
1036 EXPORT_SYMBOL(__fscache_write_page);
1037
1038 /*
1039  * remove a page from the cache
1040  */
1041 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
1042 {
1043         struct fscache_object *object;
1044
1045         _enter(",%p", page);
1046
1047         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
1048         ASSERTCMP(page, !=, NULL);
1049
1050         fscache_stat(&fscache_n_uncaches);
1051
1052         /* cache withdrawal may beat us to it */
1053         if (!PageFsCache(page))
1054                 goto done;
1055
1056         /* get the object */
1057         spin_lock(&cookie->lock);
1058
1059         if (hlist_empty(&cookie->backing_objects)) {
1060                 ClearPageFsCache(page);
1061                 goto done_unlock;
1062         }
1063
1064         object = hlist_entry(cookie->backing_objects.first,
1065                              struct fscache_object, cookie_link);
1066
1067         /* there might now be stuff on disk we could read */
1068         clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1069
1070         /* only invoke the cache backend if we managed to mark the page
1071          * uncached here; this deals with synchronisation vs withdrawal */
1072         if (TestClearPageFsCache(page) &&
1073             object->cache->ops->uncache_page) {
1074                 /* the cache backend releases the cookie lock */
1075                 fscache_stat(&fscache_n_cop_uncache_page);
1076                 object->cache->ops->uncache_page(object, page);
1077                 fscache_stat_d(&fscache_n_cop_uncache_page);
1078                 goto done;
1079         }
1080
1081 done_unlock:
1082         spin_unlock(&cookie->lock);
1083 done:
1084         _leave("");
1085 }
1086 EXPORT_SYMBOL(__fscache_uncache_page);
1087
1088 /**
1089  * fscache_mark_page_cached - Mark a page as being cached
1090  * @op: The retrieval op pages are being marked for
1091  * @page: The page to be marked
1092  *
1093  * Mark a netfs page as being cached.  After this is called, the netfs
1094  * must call fscache_uncache_page() to remove the mark.
1095  */
1096 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1097 {
1098         struct fscache_cookie *cookie = op->op.object->cookie;
1099
1100 #ifdef CONFIG_FSCACHE_STATS
1101         atomic_inc(&fscache_n_marks);
1102 #endif
1103
1104         _debug("- mark %p{%lx}", page, page->index);
1105         if (TestSetPageFsCache(page)) {
1106                 static bool once_only;
1107                 if (!once_only) {
1108                         once_only = true;
1109                         pr_warn("Cookie type %s marked page %lx multiple times\n",
1110                                 cookie->def->name, page->index);
1111                 }
1112         }
1113
1114         if (cookie->def->mark_page_cached)
1115                 cookie->def->mark_page_cached(cookie->netfs_data,
1116                                               op->mapping, page);
1117 }
1118 EXPORT_SYMBOL(fscache_mark_page_cached);
1119
1120 /**
1121  * fscache_mark_pages_cached - Mark pages as being cached
1122  * @op: The retrieval op pages are being marked for
1123  * @pagevec: The pages to be marked
1124  *
1125  * Mark a bunch of netfs pages as being cached.  After this is called,
1126  * the netfs must call fscache_uncache_page() to remove the mark.
1127  */
1128 void fscache_mark_pages_cached(struct fscache_retrieval *op,
1129                                struct pagevec *pagevec)
1130 {
1131         unsigned long loop;
1132
1133         for (loop = 0; loop < pagevec->nr; loop++)
1134                 fscache_mark_page_cached(op, pagevec->pages[loop]);
1135
1136         pagevec_reinit(pagevec);
1137 }
1138 EXPORT_SYMBOL(fscache_mark_pages_cached);
1139
1140 /*
1141  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1142  * to be associated with the given cookie.
1143  */
1144 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1145                                        struct inode *inode)
1146 {
1147         struct address_space *mapping = inode->i_mapping;
1148         struct pagevec pvec;
1149         pgoff_t next;
1150         int i;
1151
1152         _enter("%p,%p", cookie, inode);
1153
1154         if (!mapping || mapping->nrpages == 0) {
1155                 _leave(" [no pages]");
1156                 return;
1157         }
1158
1159         pagevec_init(&pvec, 0);
1160         next = 0;
1161         do {
1162                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1163                         break;
1164                 for (i = 0; i < pagevec_count(&pvec); i++) {
1165                         struct page *page = pvec.pages[i];
1166                         next = page->index;
1167                         if (PageFsCache(page)) {
1168                                 __fscache_wait_on_page_write(cookie, page);
1169                                 __fscache_uncache_page(cookie, page);
1170                         }
1171                 }
1172                 pagevec_release(&pvec);
1173                 cond_resched();
1174         } while (++next);
1175
1176         _leave("");
1177 }
1178 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);