Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / fs / fuse / dev.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22 #include <linux/aio.h>
23 #include <linux/freezer.h>
24
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27
28 static struct kmem_cache *fuse_req_cachep;
29
30 static struct fuse_conn *fuse_get_conn(struct file *file)
31 {
32         /*
33          * Lockless access is OK, because file->private data is set
34          * once during mount and is valid until the file is released.
35          */
36         return file->private_data;
37 }
38
39 static void fuse_request_init(struct fuse_req *req, struct page **pages,
40                               struct fuse_page_desc *page_descs,
41                               unsigned npages)
42 {
43         memset(req, 0, sizeof(*req));
44         memset(pages, 0, sizeof(*pages) * npages);
45         memset(page_descs, 0, sizeof(*page_descs) * npages);
46         INIT_LIST_HEAD(&req->list);
47         INIT_LIST_HEAD(&req->intr_entry);
48         init_waitqueue_head(&req->waitq);
49         atomic_set(&req->count, 1);
50         req->pages = pages;
51         req->page_descs = page_descs;
52         req->max_pages = npages;
53 }
54
55 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
56 {
57         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
58         if (req) {
59                 struct page **pages;
60                 struct fuse_page_desc *page_descs;
61
62                 if (npages <= FUSE_REQ_INLINE_PAGES) {
63                         pages = req->inline_pages;
64                         page_descs = req->inline_page_descs;
65                 } else {
66                         pages = kmalloc(sizeof(struct page *) * npages, flags);
67                         page_descs = kmalloc(sizeof(struct fuse_page_desc) *
68                                              npages, flags);
69                 }
70
71                 if (!pages || !page_descs) {
72                         kfree(pages);
73                         kfree(page_descs);
74                         kmem_cache_free(fuse_req_cachep, req);
75                         return NULL;
76                 }
77
78                 fuse_request_init(req, pages, page_descs, npages);
79         }
80         return req;
81 }
82
83 struct fuse_req *fuse_request_alloc(unsigned npages)
84 {
85         return __fuse_request_alloc(npages, GFP_KERNEL);
86 }
87 EXPORT_SYMBOL_GPL(fuse_request_alloc);
88
89 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
90 {
91         return __fuse_request_alloc(npages, GFP_NOFS);
92 }
93
94 void fuse_request_free(struct fuse_req *req)
95 {
96         if (req->pages != req->inline_pages) {
97                 kfree(req->pages);
98                 kfree(req->page_descs);
99         }
100         kmem_cache_free(fuse_req_cachep, req);
101 }
102
103 static void block_sigs(sigset_t *oldset)
104 {
105         sigset_t mask;
106
107         siginitsetinv(&mask, sigmask(SIGKILL));
108         sigprocmask(SIG_BLOCK, &mask, oldset);
109 }
110
111 static void restore_sigs(sigset_t *oldset)
112 {
113         sigprocmask(SIG_SETMASK, oldset, NULL);
114 }
115
116 void __fuse_get_request(struct fuse_req *req)
117 {
118         atomic_inc(&req->count);
119 }
120
121 /* Must be called with > 1 refcount */
122 static void __fuse_put_request(struct fuse_req *req)
123 {
124         BUG_ON(atomic_read(&req->count) < 2);
125         atomic_dec(&req->count);
126 }
127
128 static void fuse_req_init_context(struct fuse_req *req)
129 {
130         req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
131         req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
132         req->in.h.pid = current->pid;
133 }
134
135 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
136 {
137         return !fc->initialized || (for_background && fc->blocked);
138 }
139
140 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
141                                        bool for_background)
142 {
143         struct fuse_req *req;
144         int err;
145         atomic_inc(&fc->num_waiting);
146
147         if (fuse_block_alloc(fc, for_background)) {
148                 sigset_t oldset;
149                 int intr;
150
151                 block_sigs(&oldset);
152                 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
153                                 !fuse_block_alloc(fc, for_background));
154                 restore_sigs(&oldset);
155                 err = -EINTR;
156                 if (intr)
157                         goto out;
158         }
159
160         err = -ENOTCONN;
161         if (!fc->connected)
162                 goto out;
163
164         req = fuse_request_alloc(npages);
165         err = -ENOMEM;
166         if (!req) {
167                 if (for_background)
168                         wake_up(&fc->blocked_waitq);
169                 goto out;
170         }
171
172         fuse_req_init_context(req);
173         req->waiting = 1;
174         req->background = for_background;
175         return req;
176
177  out:
178         atomic_dec(&fc->num_waiting);
179         return ERR_PTR(err);
180 }
181
182 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
183 {
184         return __fuse_get_req(fc, npages, false);
185 }
186 EXPORT_SYMBOL_GPL(fuse_get_req);
187
188 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
189                                              unsigned npages)
190 {
191         return __fuse_get_req(fc, npages, true);
192 }
193 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
194
195 /*
196  * Return request in fuse_file->reserved_req.  However that may
197  * currently be in use.  If that is the case, wait for it to become
198  * available.
199  */
200 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
201                                          struct file *file)
202 {
203         struct fuse_req *req = NULL;
204         struct fuse_file *ff = file->private_data;
205
206         do {
207                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
208                 spin_lock(&fc->lock);
209                 if (ff->reserved_req) {
210                         req = ff->reserved_req;
211                         ff->reserved_req = NULL;
212                         req->stolen_file = get_file(file);
213                 }
214                 spin_unlock(&fc->lock);
215         } while (!req);
216
217         return req;
218 }
219
220 /*
221  * Put stolen request back into fuse_file->reserved_req
222  */
223 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
224 {
225         struct file *file = req->stolen_file;
226         struct fuse_file *ff = file->private_data;
227
228         spin_lock(&fc->lock);
229         fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
230         BUG_ON(ff->reserved_req);
231         ff->reserved_req = req;
232         wake_up_all(&fc->reserved_req_waitq);
233         spin_unlock(&fc->lock);
234         fput(file);
235 }
236
237 /*
238  * Gets a requests for a file operation, always succeeds
239  *
240  * This is used for sending the FLUSH request, which must get to
241  * userspace, due to POSIX locks which may need to be unlocked.
242  *
243  * If allocation fails due to OOM, use the reserved request in
244  * fuse_file.
245  *
246  * This is very unlikely to deadlock accidentally, since the
247  * filesystem should not have it's own file open.  If deadlock is
248  * intentional, it can still be broken by "aborting" the filesystem.
249  */
250 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
251                                              struct file *file)
252 {
253         struct fuse_req *req;
254
255         atomic_inc(&fc->num_waiting);
256         wait_event(fc->blocked_waitq, fc->initialized);
257         req = fuse_request_alloc(0);
258         if (!req)
259                 req = get_reserved_req(fc, file);
260
261         fuse_req_init_context(req);
262         req->waiting = 1;
263         req->background = 0;
264         return req;
265 }
266
267 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
268 {
269         if (atomic_dec_and_test(&req->count)) {
270                 if (unlikely(req->background)) {
271                         /*
272                          * We get here in the unlikely case that a background
273                          * request was allocated but not sent
274                          */
275                         spin_lock(&fc->lock);
276                         if (!fc->blocked)
277                                 wake_up(&fc->blocked_waitq);
278                         spin_unlock(&fc->lock);
279                 }
280
281                 if (req->waiting)
282                         atomic_dec(&fc->num_waiting);
283
284                 if (req->stolen_file)
285                         put_reserved_req(fc, req);
286                 else
287                         fuse_request_free(req);
288         }
289 }
290 EXPORT_SYMBOL_GPL(fuse_put_request);
291
292 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
293 {
294         unsigned nbytes = 0;
295         unsigned i;
296
297         for (i = 0; i < numargs; i++)
298                 nbytes += args[i].size;
299
300         return nbytes;
301 }
302
303 static u64 fuse_get_unique(struct fuse_conn *fc)
304 {
305         fc->reqctr++;
306         /* zero is special */
307         if (fc->reqctr == 0)
308                 fc->reqctr = 1;
309
310         return fc->reqctr;
311 }
312
313 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
314 {
315         req->in.h.len = sizeof(struct fuse_in_header) +
316                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
317         list_add_tail(&req->list, &fc->pending);
318         req->state = FUSE_REQ_PENDING;
319         if (!req->waiting) {
320                 req->waiting = 1;
321                 atomic_inc(&fc->num_waiting);
322         }
323         wake_up(&fc->waitq);
324         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
325 }
326
327 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
328                        u64 nodeid, u64 nlookup)
329 {
330         forget->forget_one.nodeid = nodeid;
331         forget->forget_one.nlookup = nlookup;
332
333         spin_lock(&fc->lock);
334         if (fc->connected) {
335                 fc->forget_list_tail->next = forget;
336                 fc->forget_list_tail = forget;
337                 wake_up(&fc->waitq);
338                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
339         } else {
340                 kfree(forget);
341         }
342         spin_unlock(&fc->lock);
343 }
344
345 static void flush_bg_queue(struct fuse_conn *fc)
346 {
347         while (fc->active_background < fc->max_background &&
348                !list_empty(&fc->bg_queue)) {
349                 struct fuse_req *req;
350
351                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
352                 list_del(&req->list);
353                 fc->active_background++;
354                 req->in.h.unique = fuse_get_unique(fc);
355                 queue_request(fc, req);
356         }
357 }
358
359 /*
360  * This function is called when a request is finished.  Either a reply
361  * has arrived or it was aborted (and not yet sent) or some error
362  * occurred during communication with userspace, or the device file
363  * was closed.  The requester thread is woken up (if still waiting),
364  * the 'end' callback is called if given, else the reference to the
365  * request is released
366  *
367  * Called with fc->lock, unlocks it
368  */
369 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
370 __releases(fc->lock)
371 {
372         void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
373         req->end = NULL;
374         list_del(&req->list);
375         list_del(&req->intr_entry);
376         req->state = FUSE_REQ_FINISHED;
377         if (req->background) {
378                 req->background = 0;
379
380                 if (fc->num_background == fc->max_background)
381                         fc->blocked = 0;
382
383                 /* Wake up next waiter, if any */
384                 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
385                         wake_up(&fc->blocked_waitq);
386
387                 if (fc->num_background == fc->congestion_threshold &&
388                     fc->connected && fc->bdi_initialized) {
389                         clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
390                         clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
391                 }
392                 fc->num_background--;
393                 fc->active_background--;
394                 flush_bg_queue(fc);
395         }
396         spin_unlock(&fc->lock);
397         wake_up(&req->waitq);
398         if (end)
399                 end(fc, req);
400         fuse_put_request(fc, req);
401 }
402
403 static void wait_answer_interruptible(struct fuse_conn *fc,
404                                       struct fuse_req *req)
405 __releases(fc->lock)
406 __acquires(fc->lock)
407 {
408         if (signal_pending(current))
409                 return;
410
411         spin_unlock(&fc->lock);
412         wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
413         spin_lock(&fc->lock);
414 }
415
416 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
417 {
418         list_add_tail(&req->intr_entry, &fc->interrupts);
419         wake_up(&fc->waitq);
420         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
421 }
422
423 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
424 __releases(fc->lock)
425 __acquires(fc->lock)
426 {
427         if (!fc->no_interrupt) {
428                 /* Any signal may interrupt this */
429                 wait_answer_interruptible(fc, req);
430
431                 if (req->aborted)
432                         goto aborted;
433                 if (req->state == FUSE_REQ_FINISHED)
434                         return;
435
436                 req->interrupted = 1;
437                 if (req->state == FUSE_REQ_SENT)
438                         queue_interrupt(fc, req);
439         }
440
441         if (!req->force) {
442                 sigset_t oldset;
443
444                 /* Only fatal signals may interrupt this */
445                 block_sigs(&oldset);
446                 wait_answer_interruptible(fc, req);
447                 restore_sigs(&oldset);
448
449                 if (req->aborted)
450                         goto aborted;
451                 if (req->state == FUSE_REQ_FINISHED)
452                         return;
453
454                 /* Request is not yet in userspace, bail out */
455                 if (req->state == FUSE_REQ_PENDING) {
456                         list_del(&req->list);
457                         __fuse_put_request(req);
458                         req->out.h.error = -EINTR;
459                         return;
460                 }
461         }
462
463         /*
464          * Either request is already in userspace, or it was forced.
465          * Wait it out.
466          */
467         spin_unlock(&fc->lock);
468
469         while (req->state != FUSE_REQ_FINISHED)
470                 wait_event_freezable(req->waitq,
471                                      req->state == FUSE_REQ_FINISHED);
472         spin_lock(&fc->lock);
473
474         if (!req->aborted)
475                 return;
476
477  aborted:
478         BUG_ON(req->state != FUSE_REQ_FINISHED);
479         if (req->locked) {
480                 /* This is uninterruptible sleep, because data is
481                    being copied to/from the buffers of req.  During
482                    locked state, there mustn't be any filesystem
483                    operation (e.g. page fault), since that could lead
484                    to deadlock */
485                 spin_unlock(&fc->lock);
486                 wait_event(req->waitq, !req->locked);
487                 spin_lock(&fc->lock);
488         }
489 }
490
491 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
492 {
493         BUG_ON(req->background);
494         spin_lock(&fc->lock);
495         if (!fc->connected)
496                 req->out.h.error = -ENOTCONN;
497         else if (fc->conn_error)
498                 req->out.h.error = -ECONNREFUSED;
499         else {
500                 req->in.h.unique = fuse_get_unique(fc);
501                 queue_request(fc, req);
502                 /* acquire extra reference, since request is still needed
503                    after request_end() */
504                 __fuse_get_request(req);
505
506                 request_wait_answer(fc, req);
507         }
508         spin_unlock(&fc->lock);
509 }
510
511 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
512 {
513         req->isreply = 1;
514         __fuse_request_send(fc, req);
515 }
516 EXPORT_SYMBOL_GPL(fuse_request_send);
517
518 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
519                                             struct fuse_req *req)
520 {
521         BUG_ON(!req->background);
522         fc->num_background++;
523         if (fc->num_background == fc->max_background)
524                 fc->blocked = 1;
525         if (fc->num_background == fc->congestion_threshold &&
526             fc->bdi_initialized) {
527                 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
528                 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
529         }
530         list_add_tail(&req->list, &fc->bg_queue);
531         flush_bg_queue(fc);
532 }
533
534 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
535 {
536         spin_lock(&fc->lock);
537         if (fc->connected) {
538                 fuse_request_send_nowait_locked(fc, req);
539                 spin_unlock(&fc->lock);
540         } else {
541                 req->out.h.error = -ENOTCONN;
542                 request_end(fc, req);
543         }
544 }
545
546 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
547 {
548         req->isreply = 1;
549         fuse_request_send_nowait(fc, req);
550 }
551 EXPORT_SYMBOL_GPL(fuse_request_send_background);
552
553 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
554                                           struct fuse_req *req, u64 unique)
555 {
556         int err = -ENODEV;
557
558         req->isreply = 0;
559         req->in.h.unique = unique;
560         spin_lock(&fc->lock);
561         if (fc->connected) {
562                 queue_request(fc, req);
563                 err = 0;
564         }
565         spin_unlock(&fc->lock);
566
567         return err;
568 }
569
570 /*
571  * Called under fc->lock
572  *
573  * fc->connected must have been checked previously
574  */
575 void fuse_request_send_background_locked(struct fuse_conn *fc,
576                                          struct fuse_req *req)
577 {
578         req->isreply = 1;
579         fuse_request_send_nowait_locked(fc, req);
580 }
581
582 void fuse_force_forget(struct file *file, u64 nodeid)
583 {
584         struct inode *inode = file_inode(file);
585         struct fuse_conn *fc = get_fuse_conn(inode);
586         struct fuse_req *req;
587         struct fuse_forget_in inarg;
588
589         memset(&inarg, 0, sizeof(inarg));
590         inarg.nlookup = 1;
591         req = fuse_get_req_nofail_nopages(fc, file);
592         req->in.h.opcode = FUSE_FORGET;
593         req->in.h.nodeid = nodeid;
594         req->in.numargs = 1;
595         req->in.args[0].size = sizeof(inarg);
596         req->in.args[0].value = &inarg;
597         req->isreply = 0;
598         __fuse_request_send(fc, req);
599         /* ignore errors */
600         fuse_put_request(fc, req);
601 }
602
603 /*
604  * Lock the request.  Up to the next unlock_request() there mustn't be
605  * anything that could cause a page-fault.  If the request was already
606  * aborted bail out.
607  */
608 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
609 {
610         int err = 0;
611         if (req) {
612                 spin_lock(&fc->lock);
613                 if (req->aborted)
614                         err = -ENOENT;
615                 else
616                         req->locked = 1;
617                 spin_unlock(&fc->lock);
618         }
619         return err;
620 }
621
622 /*
623  * Unlock request.  If it was aborted during being locked, the
624  * requester thread is currently waiting for it to be unlocked, so
625  * wake it up.
626  */
627 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
628 {
629         if (req) {
630                 spin_lock(&fc->lock);
631                 req->locked = 0;
632                 if (req->aborted)
633                         wake_up(&req->waitq);
634                 spin_unlock(&fc->lock);
635         }
636 }
637
638 struct fuse_copy_state {
639         struct fuse_conn *fc;
640         int write;
641         struct fuse_req *req;
642         const struct iovec *iov;
643         struct pipe_buffer *pipebufs;
644         struct pipe_buffer *currbuf;
645         struct pipe_inode_info *pipe;
646         unsigned long nr_segs;
647         unsigned long seglen;
648         unsigned long addr;
649         struct page *pg;
650         void *mapaddr;
651         void *buf;
652         unsigned len;
653         unsigned move_pages:1;
654 };
655
656 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
657                            int write,
658                            const struct iovec *iov, unsigned long nr_segs)
659 {
660         memset(cs, 0, sizeof(*cs));
661         cs->fc = fc;
662         cs->write = write;
663         cs->iov = iov;
664         cs->nr_segs = nr_segs;
665 }
666
667 /* Unmap and put previous page of userspace buffer */
668 static void fuse_copy_finish(struct fuse_copy_state *cs)
669 {
670         if (cs->currbuf) {
671                 struct pipe_buffer *buf = cs->currbuf;
672
673                 if (!cs->write) {
674                         buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
675                 } else {
676                         kunmap(buf->page);
677                         buf->len = PAGE_SIZE - cs->len;
678                 }
679                 cs->currbuf = NULL;
680                 cs->mapaddr = NULL;
681         } else if (cs->mapaddr) {
682                 kunmap(cs->pg);
683                 if (cs->write) {
684                         flush_dcache_page(cs->pg);
685                         set_page_dirty_lock(cs->pg);
686                 }
687                 put_page(cs->pg);
688                 cs->mapaddr = NULL;
689         }
690 }
691
692 /*
693  * Get another pagefull of userspace buffer, and map it to kernel
694  * address space, and lock request
695  */
696 static int fuse_copy_fill(struct fuse_copy_state *cs)
697 {
698         unsigned long offset;
699         int err;
700
701         unlock_request(cs->fc, cs->req);
702         fuse_copy_finish(cs);
703         if (cs->pipebufs) {
704                 struct pipe_buffer *buf = cs->pipebufs;
705
706                 if (!cs->write) {
707                         err = buf->ops->confirm(cs->pipe, buf);
708                         if (err)
709                                 return err;
710
711                         BUG_ON(!cs->nr_segs);
712                         cs->currbuf = buf;
713                         cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
714                         cs->len = buf->len;
715                         cs->buf = cs->mapaddr + buf->offset;
716                         cs->pipebufs++;
717                         cs->nr_segs--;
718                 } else {
719                         struct page *page;
720
721                         if (cs->nr_segs == cs->pipe->buffers)
722                                 return -EIO;
723
724                         page = alloc_page(GFP_HIGHUSER);
725                         if (!page)
726                                 return -ENOMEM;
727
728                         buf->page = page;
729                         buf->offset = 0;
730                         buf->len = 0;
731
732                         cs->currbuf = buf;
733                         cs->mapaddr = kmap(page);
734                         cs->buf = cs->mapaddr;
735                         cs->len = PAGE_SIZE;
736                         cs->pipebufs++;
737                         cs->nr_segs++;
738                 }
739         } else {
740                 if (!cs->seglen) {
741                         BUG_ON(!cs->nr_segs);
742                         cs->seglen = cs->iov[0].iov_len;
743                         cs->addr = (unsigned long) cs->iov[0].iov_base;
744                         cs->iov++;
745                         cs->nr_segs--;
746                 }
747                 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
748                 if (err < 0)
749                         return err;
750                 BUG_ON(err != 1);
751                 offset = cs->addr % PAGE_SIZE;
752                 cs->mapaddr = kmap(cs->pg);
753                 cs->buf = cs->mapaddr + offset;
754                 cs->len = min(PAGE_SIZE - offset, cs->seglen);
755                 cs->seglen -= cs->len;
756                 cs->addr += cs->len;
757         }
758
759         return lock_request(cs->fc, cs->req);
760 }
761
762 /* Do as much copy to/from userspace buffer as we can */
763 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
764 {
765         unsigned ncpy = min(*size, cs->len);
766         if (val) {
767                 if (cs->write)
768                         memcpy(cs->buf, *val, ncpy);
769                 else
770                         memcpy(*val, cs->buf, ncpy);
771                 *val += ncpy;
772         }
773         *size -= ncpy;
774         cs->len -= ncpy;
775         cs->buf += ncpy;
776         return ncpy;
777 }
778
779 static int fuse_check_page(struct page *page)
780 {
781         if (page_mapcount(page) ||
782             page->mapping != NULL ||
783             page_count(page) != 1 ||
784             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
785              ~(1 << PG_locked |
786                1 << PG_referenced |
787                1 << PG_uptodate |
788                1 << PG_lru |
789                1 << PG_active |
790                1 << PG_reclaim))) {
791                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
792                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
793                 return 1;
794         }
795         return 0;
796 }
797
798 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
799 {
800         int err;
801         struct page *oldpage = *pagep;
802         struct page *newpage;
803         struct pipe_buffer *buf = cs->pipebufs;
804
805         unlock_request(cs->fc, cs->req);
806         fuse_copy_finish(cs);
807
808         err = buf->ops->confirm(cs->pipe, buf);
809         if (err)
810                 return err;
811
812         BUG_ON(!cs->nr_segs);
813         cs->currbuf = buf;
814         cs->len = buf->len;
815         cs->pipebufs++;
816         cs->nr_segs--;
817
818         if (cs->len != PAGE_SIZE)
819                 goto out_fallback;
820
821         if (buf->ops->steal(cs->pipe, buf) != 0)
822                 goto out_fallback;
823
824         newpage = buf->page;
825
826         if (WARN_ON(!PageUptodate(newpage)))
827                 return -EIO;
828
829         ClearPageMappedToDisk(newpage);
830
831         if (fuse_check_page(newpage) != 0)
832                 goto out_fallback_unlock;
833
834         /*
835          * This is a new and locked page, it shouldn't be mapped or
836          * have any special flags on it
837          */
838         if (WARN_ON(page_mapped(oldpage)))
839                 goto out_fallback_unlock;
840         if (WARN_ON(page_has_private(oldpage)))
841                 goto out_fallback_unlock;
842         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
843                 goto out_fallback_unlock;
844         if (WARN_ON(PageMlocked(oldpage)))
845                 goto out_fallback_unlock;
846
847         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
848         if (err) {
849                 unlock_page(newpage);
850                 return err;
851         }
852
853         page_cache_get(newpage);
854
855         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
856                 lru_cache_add_file(newpage);
857
858         err = 0;
859         spin_lock(&cs->fc->lock);
860         if (cs->req->aborted)
861                 err = -ENOENT;
862         else
863                 *pagep = newpage;
864         spin_unlock(&cs->fc->lock);
865
866         if (err) {
867                 unlock_page(newpage);
868                 page_cache_release(newpage);
869                 return err;
870         }
871
872         unlock_page(oldpage);
873         page_cache_release(oldpage);
874         cs->len = 0;
875
876         return 0;
877
878 out_fallback_unlock:
879         unlock_page(newpage);
880 out_fallback:
881         cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
882         cs->buf = cs->mapaddr + buf->offset;
883
884         err = lock_request(cs->fc, cs->req);
885         if (err)
886                 return err;
887
888         return 1;
889 }
890
891 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
892                          unsigned offset, unsigned count)
893 {
894         struct pipe_buffer *buf;
895
896         if (cs->nr_segs == cs->pipe->buffers)
897                 return -EIO;
898
899         unlock_request(cs->fc, cs->req);
900         fuse_copy_finish(cs);
901
902         buf = cs->pipebufs;
903         page_cache_get(page);
904         buf->page = page;
905         buf->offset = offset;
906         buf->len = count;
907
908         cs->pipebufs++;
909         cs->nr_segs++;
910         cs->len = 0;
911
912         return 0;
913 }
914
915 /*
916  * Copy a page in the request to/from the userspace buffer.  Must be
917  * done atomically
918  */
919 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
920                           unsigned offset, unsigned count, int zeroing)
921 {
922         int err;
923         struct page *page = *pagep;
924
925         if (page && zeroing && count < PAGE_SIZE)
926                 clear_highpage(page);
927
928         while (count) {
929                 if (cs->write && cs->pipebufs && page) {
930                         return fuse_ref_page(cs, page, offset, count);
931                 } else if (!cs->len) {
932                         if (cs->move_pages && page &&
933                             offset == 0 && count == PAGE_SIZE) {
934                                 err = fuse_try_move_page(cs, pagep);
935                                 if (err <= 0)
936                                         return err;
937                         } else {
938                                 err = fuse_copy_fill(cs);
939                                 if (err)
940                                         return err;
941                         }
942                 }
943                 if (page) {
944                         void *mapaddr = kmap_atomic(page);
945                         void *buf = mapaddr + offset;
946                         offset += fuse_copy_do(cs, &buf, &count);
947                         kunmap_atomic(mapaddr);
948                 } else
949                         offset += fuse_copy_do(cs, NULL, &count);
950         }
951         if (page && !cs->write)
952                 flush_dcache_page(page);
953         return 0;
954 }
955
956 /* Copy pages in the request to/from userspace buffer */
957 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
958                            int zeroing)
959 {
960         unsigned i;
961         struct fuse_req *req = cs->req;
962
963         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
964                 int err;
965                 unsigned offset = req->page_descs[i].offset;
966                 unsigned count = min(nbytes, req->page_descs[i].length);
967
968                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
969                                      zeroing);
970                 if (err)
971                         return err;
972
973                 nbytes -= count;
974         }
975         return 0;
976 }
977
978 /* Copy a single argument in the request to/from userspace buffer */
979 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
980 {
981         while (size) {
982                 if (!cs->len) {
983                         int err = fuse_copy_fill(cs);
984                         if (err)
985                                 return err;
986                 }
987                 fuse_copy_do(cs, &val, &size);
988         }
989         return 0;
990 }
991
992 /* Copy request arguments to/from userspace buffer */
993 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
994                           unsigned argpages, struct fuse_arg *args,
995                           int zeroing)
996 {
997         int err = 0;
998         unsigned i;
999
1000         for (i = 0; !err && i < numargs; i++)  {
1001                 struct fuse_arg *arg = &args[i];
1002                 if (i == numargs - 1 && argpages)
1003                         err = fuse_copy_pages(cs, arg->size, zeroing);
1004                 else
1005                         err = fuse_copy_one(cs, arg->value, arg->size);
1006         }
1007         return err;
1008 }
1009
1010 static int forget_pending(struct fuse_conn *fc)
1011 {
1012         return fc->forget_list_head.next != NULL;
1013 }
1014
1015 static int request_pending(struct fuse_conn *fc)
1016 {
1017         return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
1018                 forget_pending(fc);
1019 }
1020
1021 /* Wait until a request is available on the pending list */
1022 static void request_wait(struct fuse_conn *fc)
1023 __releases(fc->lock)
1024 __acquires(fc->lock)
1025 {
1026         DECLARE_WAITQUEUE(wait, current);
1027
1028         add_wait_queue_exclusive(&fc->waitq, &wait);
1029         while (fc->connected && !request_pending(fc)) {
1030                 set_current_state(TASK_INTERRUPTIBLE);
1031                 if (signal_pending(current))
1032                         break;
1033
1034                 spin_unlock(&fc->lock);
1035                 schedule();
1036                 spin_lock(&fc->lock);
1037         }
1038         set_current_state(TASK_RUNNING);
1039         remove_wait_queue(&fc->waitq, &wait);
1040 }
1041
1042 /*
1043  * Transfer an interrupt request to userspace
1044  *
1045  * Unlike other requests this is assembled on demand, without a need
1046  * to allocate a separate fuse_req structure.
1047  *
1048  * Called with fc->lock held, releases it
1049  */
1050 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1051                                size_t nbytes, struct fuse_req *req)
1052 __releases(fc->lock)
1053 {
1054         struct fuse_in_header ih;
1055         struct fuse_interrupt_in arg;
1056         unsigned reqsize = sizeof(ih) + sizeof(arg);
1057         int err;
1058
1059         list_del_init(&req->intr_entry);
1060         req->intr_unique = fuse_get_unique(fc);
1061         memset(&ih, 0, sizeof(ih));
1062         memset(&arg, 0, sizeof(arg));
1063         ih.len = reqsize;
1064         ih.opcode = FUSE_INTERRUPT;
1065         ih.unique = req->intr_unique;
1066         arg.unique = req->in.h.unique;
1067
1068         spin_unlock(&fc->lock);
1069         if (nbytes < reqsize)
1070                 return -EINVAL;
1071
1072         err = fuse_copy_one(cs, &ih, sizeof(ih));
1073         if (!err)
1074                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1075         fuse_copy_finish(cs);
1076
1077         return err ? err : reqsize;
1078 }
1079
1080 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1081                                                unsigned max,
1082                                                unsigned *countp)
1083 {
1084         struct fuse_forget_link *head = fc->forget_list_head.next;
1085         struct fuse_forget_link **newhead = &head;
1086         unsigned count;
1087
1088         for (count = 0; *newhead != NULL && count < max; count++)
1089                 newhead = &(*newhead)->next;
1090
1091         fc->forget_list_head.next = *newhead;
1092         *newhead = NULL;
1093         if (fc->forget_list_head.next == NULL)
1094                 fc->forget_list_tail = &fc->forget_list_head;
1095
1096         if (countp != NULL)
1097                 *countp = count;
1098
1099         return head;
1100 }
1101
1102 static int fuse_read_single_forget(struct fuse_conn *fc,
1103                                    struct fuse_copy_state *cs,
1104                                    size_t nbytes)
1105 __releases(fc->lock)
1106 {
1107         int err;
1108         struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1109         struct fuse_forget_in arg = {
1110                 .nlookup = forget->forget_one.nlookup,
1111         };
1112         struct fuse_in_header ih = {
1113                 .opcode = FUSE_FORGET,
1114                 .nodeid = forget->forget_one.nodeid,
1115                 .unique = fuse_get_unique(fc),
1116                 .len = sizeof(ih) + sizeof(arg),
1117         };
1118
1119         spin_unlock(&fc->lock);
1120         kfree(forget);
1121         if (nbytes < ih.len)
1122                 return -EINVAL;
1123
1124         err = fuse_copy_one(cs, &ih, sizeof(ih));
1125         if (!err)
1126                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1127         fuse_copy_finish(cs);
1128
1129         if (err)
1130                 return err;
1131
1132         return ih.len;
1133 }
1134
1135 static int fuse_read_batch_forget(struct fuse_conn *fc,
1136                                    struct fuse_copy_state *cs, size_t nbytes)
1137 __releases(fc->lock)
1138 {
1139         int err;
1140         unsigned max_forgets;
1141         unsigned count;
1142         struct fuse_forget_link *head;
1143         struct fuse_batch_forget_in arg = { .count = 0 };
1144         struct fuse_in_header ih = {
1145                 .opcode = FUSE_BATCH_FORGET,
1146                 .unique = fuse_get_unique(fc),
1147                 .len = sizeof(ih) + sizeof(arg),
1148         };
1149
1150         if (nbytes < ih.len) {
1151                 spin_unlock(&fc->lock);
1152                 return -EINVAL;
1153         }
1154
1155         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1156         head = dequeue_forget(fc, max_forgets, &count);
1157         spin_unlock(&fc->lock);
1158
1159         arg.count = count;
1160         ih.len += count * sizeof(struct fuse_forget_one);
1161         err = fuse_copy_one(cs, &ih, sizeof(ih));
1162         if (!err)
1163                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1164
1165         while (head) {
1166                 struct fuse_forget_link *forget = head;
1167
1168                 if (!err) {
1169                         err = fuse_copy_one(cs, &forget->forget_one,
1170                                             sizeof(forget->forget_one));
1171                 }
1172                 head = forget->next;
1173                 kfree(forget);
1174         }
1175
1176         fuse_copy_finish(cs);
1177
1178         if (err)
1179                 return err;
1180
1181         return ih.len;
1182 }
1183
1184 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1185                             size_t nbytes)
1186 __releases(fc->lock)
1187 {
1188         if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1189                 return fuse_read_single_forget(fc, cs, nbytes);
1190         else
1191                 return fuse_read_batch_forget(fc, cs, nbytes);
1192 }
1193
1194 /*
1195  * Read a single request into the userspace filesystem's buffer.  This
1196  * function waits until a request is available, then removes it from
1197  * the pending list and copies request data to userspace buffer.  If
1198  * no reply is needed (FORGET) or request has been aborted or there
1199  * was an error during the copying then it's finished by calling
1200  * request_end().  Otherwise add it to the processing list, and set
1201  * the 'sent' flag.
1202  */
1203 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1204                                 struct fuse_copy_state *cs, size_t nbytes)
1205 {
1206         int err;
1207         struct fuse_req *req;
1208         struct fuse_in *in;
1209         unsigned reqsize;
1210
1211  restart:
1212         spin_lock(&fc->lock);
1213         err = -EAGAIN;
1214         if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1215             !request_pending(fc))
1216                 goto err_unlock;
1217
1218         request_wait(fc);
1219         err = -ENODEV;
1220         if (!fc->connected)
1221                 goto err_unlock;
1222         err = -ERESTARTSYS;
1223         if (!request_pending(fc))
1224                 goto err_unlock;
1225
1226         if (!list_empty(&fc->interrupts)) {
1227                 req = list_entry(fc->interrupts.next, struct fuse_req,
1228                                  intr_entry);
1229                 return fuse_read_interrupt(fc, cs, nbytes, req);
1230         }
1231
1232         if (forget_pending(fc)) {
1233                 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1234                         return fuse_read_forget(fc, cs, nbytes);
1235
1236                 if (fc->forget_batch <= -8)
1237                         fc->forget_batch = 16;
1238         }
1239
1240         req = list_entry(fc->pending.next, struct fuse_req, list);
1241         req->state = FUSE_REQ_READING;
1242         list_move(&req->list, &fc->io);
1243
1244         in = &req->in;
1245         reqsize = in->h.len;
1246         /* If request is too large, reply with an error and restart the read */
1247         if (nbytes < reqsize) {
1248                 req->out.h.error = -EIO;
1249                 /* SETXATTR is special, since it may contain too large data */
1250                 if (in->h.opcode == FUSE_SETXATTR)
1251                         req->out.h.error = -E2BIG;
1252                 request_end(fc, req);
1253                 goto restart;
1254         }
1255         spin_unlock(&fc->lock);
1256         cs->req = req;
1257         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1258         if (!err)
1259                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1260                                      (struct fuse_arg *) in->args, 0);
1261         fuse_copy_finish(cs);
1262         spin_lock(&fc->lock);
1263         req->locked = 0;
1264         if (req->aborted) {
1265                 request_end(fc, req);
1266                 return -ENODEV;
1267         }
1268         if (err) {
1269                 req->out.h.error = -EIO;
1270                 request_end(fc, req);
1271                 return err;
1272         }
1273         if (!req->isreply)
1274                 request_end(fc, req);
1275         else {
1276                 req->state = FUSE_REQ_SENT;
1277                 list_move_tail(&req->list, &fc->processing);
1278                 if (req->interrupted)
1279                         queue_interrupt(fc, req);
1280                 spin_unlock(&fc->lock);
1281         }
1282         return reqsize;
1283
1284  err_unlock:
1285         spin_unlock(&fc->lock);
1286         return err;
1287 }
1288
1289 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1290                               unsigned long nr_segs, loff_t pos)
1291 {
1292         struct fuse_copy_state cs;
1293         struct file *file = iocb->ki_filp;
1294         struct fuse_conn *fc = fuse_get_conn(file);
1295         if (!fc)
1296                 return -EPERM;
1297
1298         fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1299
1300         return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1301 }
1302
1303 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1304                                    struct pipe_buffer *buf)
1305 {
1306         return 1;
1307 }
1308
1309 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1310         .can_merge = 0,
1311         .map = generic_pipe_buf_map,
1312         .unmap = generic_pipe_buf_unmap,
1313         .confirm = generic_pipe_buf_confirm,
1314         .release = generic_pipe_buf_release,
1315         .steal = fuse_dev_pipe_buf_steal,
1316         .get = generic_pipe_buf_get,
1317 };
1318
1319 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1320                                     struct pipe_inode_info *pipe,
1321                                     size_t len, unsigned int flags)
1322 {
1323         int ret;
1324         int page_nr = 0;
1325         int do_wakeup = 0;
1326         struct pipe_buffer *bufs;
1327         struct fuse_copy_state cs;
1328         struct fuse_conn *fc = fuse_get_conn(in);
1329         if (!fc)
1330                 return -EPERM;
1331
1332         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1333         if (!bufs)
1334                 return -ENOMEM;
1335
1336         fuse_copy_init(&cs, fc, 1, NULL, 0);
1337         cs.pipebufs = bufs;
1338         cs.pipe = pipe;
1339         ret = fuse_dev_do_read(fc, in, &cs, len);
1340         if (ret < 0)
1341                 goto out;
1342
1343         ret = 0;
1344         pipe_lock(pipe);
1345
1346         if (!pipe->readers) {
1347                 send_sig(SIGPIPE, current, 0);
1348                 if (!ret)
1349                         ret = -EPIPE;
1350                 goto out_unlock;
1351         }
1352
1353         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1354                 ret = -EIO;
1355                 goto out_unlock;
1356         }
1357
1358         while (page_nr < cs.nr_segs) {
1359                 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1360                 struct pipe_buffer *buf = pipe->bufs + newbuf;
1361
1362                 buf->page = bufs[page_nr].page;
1363                 buf->offset = bufs[page_nr].offset;
1364                 buf->len = bufs[page_nr].len;
1365                 buf->ops = &fuse_dev_pipe_buf_ops;
1366
1367                 pipe->nrbufs++;
1368                 page_nr++;
1369                 ret += buf->len;
1370
1371                 if (pipe->files)
1372                         do_wakeup = 1;
1373         }
1374
1375 out_unlock:
1376         pipe_unlock(pipe);
1377
1378         if (do_wakeup) {
1379                 smp_mb();
1380                 if (waitqueue_active(&pipe->wait))
1381                         wake_up_interruptible(&pipe->wait);
1382                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1383         }
1384
1385 out:
1386         for (; page_nr < cs.nr_segs; page_nr++)
1387                 page_cache_release(bufs[page_nr].page);
1388
1389         kfree(bufs);
1390         return ret;
1391 }
1392
1393 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1394                             struct fuse_copy_state *cs)
1395 {
1396         struct fuse_notify_poll_wakeup_out outarg;
1397         int err = -EINVAL;
1398
1399         if (size != sizeof(outarg))
1400                 goto err;
1401
1402         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1403         if (err)
1404                 goto err;
1405
1406         fuse_copy_finish(cs);
1407         return fuse_notify_poll_wakeup(fc, &outarg);
1408
1409 err:
1410         fuse_copy_finish(cs);
1411         return err;
1412 }
1413
1414 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1415                                    struct fuse_copy_state *cs)
1416 {
1417         struct fuse_notify_inval_inode_out outarg;
1418         int err = -EINVAL;
1419
1420         if (size != sizeof(outarg))
1421                 goto err;
1422
1423         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1424         if (err)
1425                 goto err;
1426         fuse_copy_finish(cs);
1427
1428         down_read(&fc->killsb);
1429         err = -ENOENT;
1430         if (fc->sb) {
1431                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1432                                                outarg.off, outarg.len);
1433         }
1434         up_read(&fc->killsb);
1435         return err;
1436
1437 err:
1438         fuse_copy_finish(cs);
1439         return err;
1440 }
1441
1442 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1443                                    struct fuse_copy_state *cs)
1444 {
1445         struct fuse_notify_inval_entry_out outarg;
1446         int err = -ENOMEM;
1447         char *buf;
1448         struct qstr name;
1449
1450         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1451         if (!buf)
1452                 goto err;
1453
1454         err = -EINVAL;
1455         if (size < sizeof(outarg))
1456                 goto err;
1457
1458         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1459         if (err)
1460                 goto err;
1461
1462         err = -ENAMETOOLONG;
1463         if (outarg.namelen > FUSE_NAME_MAX)
1464                 goto err;
1465
1466         err = -EINVAL;
1467         if (size != sizeof(outarg) + outarg.namelen + 1)
1468                 goto err;
1469
1470         name.name = buf;
1471         name.len = outarg.namelen;
1472         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1473         if (err)
1474                 goto err;
1475         fuse_copy_finish(cs);
1476         buf[outarg.namelen] = 0;
1477         name.hash = full_name_hash(name.name, name.len);
1478
1479         down_read(&fc->killsb);
1480         err = -ENOENT;
1481         if (fc->sb)
1482                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1483         up_read(&fc->killsb);
1484         kfree(buf);
1485         return err;
1486
1487 err:
1488         kfree(buf);
1489         fuse_copy_finish(cs);
1490         return err;
1491 }
1492
1493 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1494                               struct fuse_copy_state *cs)
1495 {
1496         struct fuse_notify_delete_out outarg;
1497         int err = -ENOMEM;
1498         char *buf;
1499         struct qstr name;
1500
1501         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1502         if (!buf)
1503                 goto err;
1504
1505         err = -EINVAL;
1506         if (size < sizeof(outarg))
1507                 goto err;
1508
1509         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1510         if (err)
1511                 goto err;
1512
1513         err = -ENAMETOOLONG;
1514         if (outarg.namelen > FUSE_NAME_MAX)
1515                 goto err;
1516
1517         err = -EINVAL;
1518         if (size != sizeof(outarg) + outarg.namelen + 1)
1519                 goto err;
1520
1521         name.name = buf;
1522         name.len = outarg.namelen;
1523         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1524         if (err)
1525                 goto err;
1526         fuse_copy_finish(cs);
1527         buf[outarg.namelen] = 0;
1528         name.hash = full_name_hash(name.name, name.len);
1529
1530         down_read(&fc->killsb);
1531         err = -ENOENT;
1532         if (fc->sb)
1533                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1534                                                outarg.child, &name);
1535         up_read(&fc->killsb);
1536         kfree(buf);
1537         return err;
1538
1539 err:
1540         kfree(buf);
1541         fuse_copy_finish(cs);
1542         return err;
1543 }
1544
1545 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1546                              struct fuse_copy_state *cs)
1547 {
1548         struct fuse_notify_store_out outarg;
1549         struct inode *inode;
1550         struct address_space *mapping;
1551         u64 nodeid;
1552         int err;
1553         pgoff_t index;
1554         unsigned int offset;
1555         unsigned int num;
1556         loff_t file_size;
1557         loff_t end;
1558
1559         err = -EINVAL;
1560         if (size < sizeof(outarg))
1561                 goto out_finish;
1562
1563         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1564         if (err)
1565                 goto out_finish;
1566
1567         err = -EINVAL;
1568         if (size - sizeof(outarg) != outarg.size)
1569                 goto out_finish;
1570
1571         nodeid = outarg.nodeid;
1572
1573         down_read(&fc->killsb);
1574
1575         err = -ENOENT;
1576         if (!fc->sb)
1577                 goto out_up_killsb;
1578
1579         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1580         if (!inode)
1581                 goto out_up_killsb;
1582
1583         mapping = inode->i_mapping;
1584         index = outarg.offset >> PAGE_CACHE_SHIFT;
1585         offset = outarg.offset & ~PAGE_CACHE_MASK;
1586         file_size = i_size_read(inode);
1587         end = outarg.offset + outarg.size;
1588         if (end > file_size) {
1589                 file_size = end;
1590                 fuse_write_update_size(inode, file_size);
1591         }
1592
1593         num = outarg.size;
1594         while (num) {
1595                 struct page *page;
1596                 unsigned int this_num;
1597
1598                 err = -ENOMEM;
1599                 page = find_or_create_page(mapping, index,
1600                                            mapping_gfp_mask(mapping));
1601                 if (!page)
1602                         goto out_iput;
1603
1604                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1605                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1606                 if (!err && offset == 0 && (num != 0 || file_size == end))
1607                         SetPageUptodate(page);
1608                 unlock_page(page);
1609                 page_cache_release(page);
1610
1611                 if (err)
1612                         goto out_iput;
1613
1614                 num -= this_num;
1615                 offset = 0;
1616                 index++;
1617         }
1618
1619         err = 0;
1620
1621 out_iput:
1622         iput(inode);
1623 out_up_killsb:
1624         up_read(&fc->killsb);
1625 out_finish:
1626         fuse_copy_finish(cs);
1627         return err;
1628 }
1629
1630 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1631 {
1632         release_pages(req->pages, req->num_pages, 0);
1633 }
1634
1635 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1636                          struct fuse_notify_retrieve_out *outarg)
1637 {
1638         int err;
1639         struct address_space *mapping = inode->i_mapping;
1640         struct fuse_req *req;
1641         pgoff_t index;
1642         loff_t file_size;
1643         unsigned int num;
1644         unsigned int offset;
1645         size_t total_len = 0;
1646         int num_pages;
1647
1648         offset = outarg->offset & ~PAGE_CACHE_MASK;
1649         file_size = i_size_read(inode);
1650
1651         num = outarg->size;
1652         if (outarg->offset > file_size)
1653                 num = 0;
1654         else if (outarg->offset + num > file_size)
1655                 num = file_size - outarg->offset;
1656
1657         num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1658         num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1659
1660         req = fuse_get_req(fc, num_pages);
1661         if (IS_ERR(req))
1662                 return PTR_ERR(req);
1663
1664         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1665         req->in.h.nodeid = outarg->nodeid;
1666         req->in.numargs = 2;
1667         req->in.argpages = 1;
1668         req->page_descs[0].offset = offset;
1669         req->end = fuse_retrieve_end;
1670
1671         index = outarg->offset >> PAGE_CACHE_SHIFT;
1672
1673         while (num && req->num_pages < num_pages) {
1674                 struct page *page;
1675                 unsigned int this_num;
1676
1677                 page = find_get_page(mapping, index);
1678                 if (!page)
1679                         break;
1680
1681                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1682                 req->pages[req->num_pages] = page;
1683                 req->page_descs[req->num_pages].length = this_num;
1684                 req->num_pages++;
1685
1686                 offset = 0;
1687                 num -= this_num;
1688                 total_len += this_num;
1689                 index++;
1690         }
1691         req->misc.retrieve_in.offset = outarg->offset;
1692         req->misc.retrieve_in.size = total_len;
1693         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1694         req->in.args[0].value = &req->misc.retrieve_in;
1695         req->in.args[1].size = total_len;
1696
1697         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1698         if (err)
1699                 fuse_retrieve_end(fc, req);
1700
1701         return err;
1702 }
1703
1704 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1705                                 struct fuse_copy_state *cs)
1706 {
1707         struct fuse_notify_retrieve_out outarg;
1708         struct inode *inode;
1709         int err;
1710
1711         err = -EINVAL;
1712         if (size != sizeof(outarg))
1713                 goto copy_finish;
1714
1715         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1716         if (err)
1717                 goto copy_finish;
1718
1719         fuse_copy_finish(cs);
1720
1721         down_read(&fc->killsb);
1722         err = -ENOENT;
1723         if (fc->sb) {
1724                 u64 nodeid = outarg.nodeid;
1725
1726                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1727                 if (inode) {
1728                         err = fuse_retrieve(fc, inode, &outarg);
1729                         iput(inode);
1730                 }
1731         }
1732         up_read(&fc->killsb);
1733
1734         return err;
1735
1736 copy_finish:
1737         fuse_copy_finish(cs);
1738         return err;
1739 }
1740
1741 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1742                        unsigned int size, struct fuse_copy_state *cs)
1743 {
1744         switch (code) {
1745         case FUSE_NOTIFY_POLL:
1746                 return fuse_notify_poll(fc, size, cs);
1747
1748         case FUSE_NOTIFY_INVAL_INODE:
1749                 return fuse_notify_inval_inode(fc, size, cs);
1750
1751         case FUSE_NOTIFY_INVAL_ENTRY:
1752                 return fuse_notify_inval_entry(fc, size, cs);
1753
1754         case FUSE_NOTIFY_STORE:
1755                 return fuse_notify_store(fc, size, cs);
1756
1757         case FUSE_NOTIFY_RETRIEVE:
1758                 return fuse_notify_retrieve(fc, size, cs);
1759
1760         case FUSE_NOTIFY_DELETE:
1761                 return fuse_notify_delete(fc, size, cs);
1762
1763         default:
1764                 fuse_copy_finish(cs);
1765                 return -EINVAL;
1766         }
1767 }
1768
1769 /* Look up request on processing list by unique ID */
1770 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1771 {
1772         struct list_head *entry;
1773
1774         list_for_each(entry, &fc->processing) {
1775                 struct fuse_req *req;
1776                 req = list_entry(entry, struct fuse_req, list);
1777                 if (req->in.h.unique == unique || req->intr_unique == unique)
1778                         return req;
1779         }
1780         return NULL;
1781 }
1782
1783 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1784                          unsigned nbytes)
1785 {
1786         unsigned reqsize = sizeof(struct fuse_out_header);
1787
1788         if (out->h.error)
1789                 return nbytes != reqsize ? -EINVAL : 0;
1790
1791         reqsize += len_args(out->numargs, out->args);
1792
1793         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1794                 return -EINVAL;
1795         else if (reqsize > nbytes) {
1796                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1797                 unsigned diffsize = reqsize - nbytes;
1798                 if (diffsize > lastarg->size)
1799                         return -EINVAL;
1800                 lastarg->size -= diffsize;
1801         }
1802         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1803                               out->page_zeroing);
1804 }
1805
1806 /*
1807  * Write a single reply to a request.  First the header is copied from
1808  * the write buffer.  The request is then searched on the processing
1809  * list by the unique ID found in the header.  If found, then remove
1810  * it from the list and copy the rest of the buffer to the request.
1811  * The request is finished by calling request_end()
1812  */
1813 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1814                                  struct fuse_copy_state *cs, size_t nbytes)
1815 {
1816         int err;
1817         struct fuse_req *req;
1818         struct fuse_out_header oh;
1819
1820         if (nbytes < sizeof(struct fuse_out_header))
1821                 return -EINVAL;
1822
1823         err = fuse_copy_one(cs, &oh, sizeof(oh));
1824         if (err)
1825                 goto err_finish;
1826
1827         err = -EINVAL;
1828         if (oh.len != nbytes)
1829                 goto err_finish;
1830
1831         /*
1832          * Zero oh.unique indicates unsolicited notification message
1833          * and error contains notification code.
1834          */
1835         if (!oh.unique) {
1836                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1837                 return err ? err : nbytes;
1838         }
1839
1840         err = -EINVAL;
1841         if (oh.error <= -1000 || oh.error > 0)
1842                 goto err_finish;
1843
1844         spin_lock(&fc->lock);
1845         err = -ENOENT;
1846         if (!fc->connected)
1847                 goto err_unlock;
1848
1849         req = request_find(fc, oh.unique);
1850         if (!req)
1851                 goto err_unlock;
1852
1853         if (req->aborted) {
1854                 spin_unlock(&fc->lock);
1855                 fuse_copy_finish(cs);
1856                 spin_lock(&fc->lock);
1857                 request_end(fc, req);
1858                 return -ENOENT;
1859         }
1860         /* Is it an interrupt reply? */
1861         if (req->intr_unique == oh.unique) {
1862                 err = -EINVAL;
1863                 if (nbytes != sizeof(struct fuse_out_header))
1864                         goto err_unlock;
1865
1866                 if (oh.error == -ENOSYS)
1867                         fc->no_interrupt = 1;
1868                 else if (oh.error == -EAGAIN)
1869                         queue_interrupt(fc, req);
1870
1871                 spin_unlock(&fc->lock);
1872                 fuse_copy_finish(cs);
1873                 return nbytes;
1874         }
1875
1876         req->state = FUSE_REQ_WRITING;
1877         list_move(&req->list, &fc->io);
1878         req->out.h = oh;
1879         req->locked = 1;
1880         cs->req = req;
1881         if (!req->out.page_replace)
1882                 cs->move_pages = 0;
1883         spin_unlock(&fc->lock);
1884
1885         err = copy_out_args(cs, &req->out, nbytes);
1886         fuse_copy_finish(cs);
1887
1888         spin_lock(&fc->lock);
1889         req->locked = 0;
1890         if (!err) {
1891                 if (req->aborted)
1892                         err = -ENOENT;
1893         } else if (!req->aborted)
1894                 req->out.h.error = -EIO;
1895         request_end(fc, req);
1896
1897         return err ? err : nbytes;
1898
1899  err_unlock:
1900         spin_unlock(&fc->lock);
1901  err_finish:
1902         fuse_copy_finish(cs);
1903         return err;
1904 }
1905
1906 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1907                               unsigned long nr_segs, loff_t pos)
1908 {
1909         struct fuse_copy_state cs;
1910         struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1911         if (!fc)
1912                 return -EPERM;
1913
1914         fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1915
1916         return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1917 }
1918
1919 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1920                                      struct file *out, loff_t *ppos,
1921                                      size_t len, unsigned int flags)
1922 {
1923         unsigned nbuf;
1924         unsigned idx;
1925         struct pipe_buffer *bufs;
1926         struct fuse_copy_state cs;
1927         struct fuse_conn *fc;
1928         size_t rem;
1929         ssize_t ret;
1930
1931         fc = fuse_get_conn(out);
1932         if (!fc)
1933                 return -EPERM;
1934
1935         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1936         if (!bufs)
1937                 return -ENOMEM;
1938
1939         pipe_lock(pipe);
1940         nbuf = 0;
1941         rem = 0;
1942         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1943                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1944
1945         ret = -EINVAL;
1946         if (rem < len) {
1947                 pipe_unlock(pipe);
1948                 goto out;
1949         }
1950
1951         rem = len;
1952         while (rem) {
1953                 struct pipe_buffer *ibuf;
1954                 struct pipe_buffer *obuf;
1955
1956                 BUG_ON(nbuf >= pipe->buffers);
1957                 BUG_ON(!pipe->nrbufs);
1958                 ibuf = &pipe->bufs[pipe->curbuf];
1959                 obuf = &bufs[nbuf];
1960
1961                 if (rem >= ibuf->len) {
1962                         *obuf = *ibuf;
1963                         ibuf->ops = NULL;
1964                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1965                         pipe->nrbufs--;
1966                 } else {
1967                         ibuf->ops->get(pipe, ibuf);
1968                         *obuf = *ibuf;
1969                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1970                         obuf->len = rem;
1971                         ibuf->offset += obuf->len;
1972                         ibuf->len -= obuf->len;
1973                 }
1974                 nbuf++;
1975                 rem -= obuf->len;
1976         }
1977         pipe_unlock(pipe);
1978
1979         fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1980         cs.pipebufs = bufs;
1981         cs.pipe = pipe;
1982
1983         if (flags & SPLICE_F_MOVE)
1984                 cs.move_pages = 1;
1985
1986         ret = fuse_dev_do_write(fc, &cs, len);
1987
1988         for (idx = 0; idx < nbuf; idx++) {
1989                 struct pipe_buffer *buf = &bufs[idx];
1990                 buf->ops->release(pipe, buf);
1991         }
1992 out:
1993         kfree(bufs);
1994         return ret;
1995 }
1996
1997 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1998 {
1999         unsigned mask = POLLOUT | POLLWRNORM;
2000         struct fuse_conn *fc = fuse_get_conn(file);
2001         if (!fc)
2002                 return POLLERR;
2003
2004         poll_wait(file, &fc->waitq, wait);
2005
2006         spin_lock(&fc->lock);
2007         if (!fc->connected)
2008                 mask = POLLERR;
2009         else if (request_pending(fc))
2010                 mask |= POLLIN | POLLRDNORM;
2011         spin_unlock(&fc->lock);
2012
2013         return mask;
2014 }
2015
2016 /*
2017  * Abort all requests on the given list (pending or processing)
2018  *
2019  * This function releases and reacquires fc->lock
2020  */
2021 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2022 __releases(fc->lock)
2023 __acquires(fc->lock)
2024 {
2025         while (!list_empty(head)) {
2026                 struct fuse_req *req;
2027                 req = list_entry(head->next, struct fuse_req, list);
2028                 req->out.h.error = -ECONNABORTED;
2029                 request_end(fc, req);
2030                 spin_lock(&fc->lock);
2031         }
2032 }
2033
2034 /*
2035  * Abort requests under I/O
2036  *
2037  * The requests are set to aborted and finished, and the request
2038  * waiter is woken up.  This will make request_wait_answer() wait
2039  * until the request is unlocked and then return.
2040  *
2041  * If the request is asynchronous, then the end function needs to be
2042  * called after waiting for the request to be unlocked (if it was
2043  * locked).
2044  */
2045 static void end_io_requests(struct fuse_conn *fc)
2046 __releases(fc->lock)
2047 __acquires(fc->lock)
2048 {
2049         while (!list_empty(&fc->io)) {
2050                 struct fuse_req *req =
2051                         list_entry(fc->io.next, struct fuse_req, list);
2052                 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2053
2054                 req->aborted = 1;
2055                 req->out.h.error = -ECONNABORTED;
2056                 req->state = FUSE_REQ_FINISHED;
2057                 list_del_init(&req->list);
2058                 wake_up(&req->waitq);
2059                 if (end) {
2060                         req->end = NULL;
2061                         __fuse_get_request(req);
2062                         spin_unlock(&fc->lock);
2063                         wait_event(req->waitq, !req->locked);
2064                         end(fc, req);
2065                         fuse_put_request(fc, req);
2066                         spin_lock(&fc->lock);
2067                 }
2068         }
2069 }
2070
2071 static void end_queued_requests(struct fuse_conn *fc)
2072 __releases(fc->lock)
2073 __acquires(fc->lock)
2074 {
2075         fc->max_background = UINT_MAX;
2076         flush_bg_queue(fc);
2077         end_requests(fc, &fc->pending);
2078         end_requests(fc, &fc->processing);
2079         while (forget_pending(fc))
2080                 kfree(dequeue_forget(fc, 1, NULL));
2081 }
2082
2083 static void end_polls(struct fuse_conn *fc)
2084 {
2085         struct rb_node *p;
2086
2087         p = rb_first(&fc->polled_files);
2088
2089         while (p) {
2090                 struct fuse_file *ff;
2091                 ff = rb_entry(p, struct fuse_file, polled_node);
2092                 wake_up_interruptible_all(&ff->poll_wait);
2093
2094                 p = rb_next(p);
2095         }
2096 }
2097
2098 /*
2099  * Abort all requests.
2100  *
2101  * Emergency exit in case of a malicious or accidental deadlock, or
2102  * just a hung filesystem.
2103  *
2104  * The same effect is usually achievable through killing the
2105  * filesystem daemon and all users of the filesystem.  The exception
2106  * is the combination of an asynchronous request and the tricky
2107  * deadlock (see Documentation/filesystems/fuse.txt).
2108  *
2109  * During the aborting, progression of requests from the pending and
2110  * processing lists onto the io list, and progression of new requests
2111  * onto the pending list is prevented by req->connected being false.
2112  *
2113  * Progression of requests under I/O to the processing list is
2114  * prevented by the req->aborted flag being true for these requests.
2115  * For this reason requests on the io list must be aborted first.
2116  */
2117 void fuse_abort_conn(struct fuse_conn *fc)
2118 {
2119         spin_lock(&fc->lock);
2120         if (fc->connected) {
2121                 fc->connected = 0;
2122                 fc->blocked = 0;
2123                 fc->initialized = 1;
2124                 end_io_requests(fc);
2125                 end_queued_requests(fc);
2126                 end_polls(fc);
2127                 wake_up_all(&fc->waitq);
2128                 wake_up_all(&fc->blocked_waitq);
2129                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2130         }
2131         spin_unlock(&fc->lock);
2132 }
2133 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2134
2135 int fuse_dev_release(struct inode *inode, struct file *file)
2136 {
2137         struct fuse_conn *fc = fuse_get_conn(file);
2138         if (fc) {
2139                 spin_lock(&fc->lock);
2140                 fc->connected = 0;
2141                 fc->blocked = 0;
2142                 fc->initialized = 1;
2143                 end_queued_requests(fc);
2144                 end_polls(fc);
2145                 wake_up_all(&fc->blocked_waitq);
2146                 spin_unlock(&fc->lock);
2147                 fuse_conn_put(fc);
2148         }
2149
2150         return 0;
2151 }
2152 EXPORT_SYMBOL_GPL(fuse_dev_release);
2153
2154 static int fuse_dev_fasync(int fd, struct file *file, int on)
2155 {
2156         struct fuse_conn *fc = fuse_get_conn(file);
2157         if (!fc)
2158                 return -EPERM;
2159
2160         /* No locking - fasync_helper does its own locking */
2161         return fasync_helper(fd, file, on, &fc->fasync);
2162 }
2163
2164 const struct file_operations fuse_dev_operations = {
2165         .owner          = THIS_MODULE,
2166         .llseek         = no_llseek,
2167         .read           = do_sync_read,
2168         .aio_read       = fuse_dev_read,
2169         .splice_read    = fuse_dev_splice_read,
2170         .write          = do_sync_write,
2171         .aio_write      = fuse_dev_write,
2172         .splice_write   = fuse_dev_splice_write,
2173         .poll           = fuse_dev_poll,
2174         .release        = fuse_dev_release,
2175         .fasync         = fuse_dev_fasync,
2176 };
2177 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2178
2179 static struct miscdevice fuse_miscdevice = {
2180         .minor = FUSE_MINOR,
2181         .name  = "fuse",
2182         .fops = &fuse_dev_operations,
2183 };
2184
2185 int __init fuse_dev_init(void)
2186 {
2187         int err = -ENOMEM;
2188         fuse_req_cachep = kmem_cache_create("fuse_request",
2189                                             sizeof(struct fuse_req),
2190                                             0, 0, NULL);
2191         if (!fuse_req_cachep)
2192                 goto out;
2193
2194         err = misc_register(&fuse_miscdevice);
2195         if (err)
2196                 goto out_cache_clean;
2197
2198         return 0;
2199
2200  out_cache_clean:
2201         kmem_cache_destroy(fuse_req_cachep);
2202  out:
2203         return err;
2204 }
2205
2206 void fuse_dev_cleanup(void)
2207 {
2208         misc_deregister(&fuse_miscdevice);
2209         kmem_cache_destroy(fuse_req_cachep);
2210 }