Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[firefly-linux-kernel-4.4.55.git] / fs / fuse / dev.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24 MODULE_ALIAS("devname:fuse");
25
26 static struct kmem_cache *fuse_req_cachep;
27
28 static struct fuse_conn *fuse_get_conn(struct file *file)
29 {
30         /*
31          * Lockless access is OK, because file->private data is set
32          * once during mount and is valid until the file is released.
33          */
34         return file->private_data;
35 }
36
37 static void fuse_request_init(struct fuse_req *req, struct page **pages,
38                               struct fuse_page_desc *page_descs,
39                               unsigned npages)
40 {
41         memset(req, 0, sizeof(*req));
42         memset(pages, 0, sizeof(*pages) * npages);
43         memset(page_descs, 0, sizeof(*page_descs) * npages);
44         INIT_LIST_HEAD(&req->list);
45         INIT_LIST_HEAD(&req->intr_entry);
46         init_waitqueue_head(&req->waitq);
47         atomic_set(&req->count, 1);
48         req->pages = pages;
49         req->page_descs = page_descs;
50         req->max_pages = npages;
51 }
52
53 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
54 {
55         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
56         if (req) {
57                 struct page **pages;
58                 struct fuse_page_desc *page_descs;
59
60                 if (npages <= FUSE_REQ_INLINE_PAGES) {
61                         pages = req->inline_pages;
62                         page_descs = req->inline_page_descs;
63                 } else {
64                         pages = kmalloc(sizeof(struct page *) * npages, flags);
65                         page_descs = kmalloc(sizeof(struct fuse_page_desc) *
66                                              npages, flags);
67                 }
68
69                 if (!pages || !page_descs) {
70                         kfree(pages);
71                         kfree(page_descs);
72                         kmem_cache_free(fuse_req_cachep, req);
73                         return NULL;
74                 }
75
76                 fuse_request_init(req, pages, page_descs, npages);
77         }
78         return req;
79 }
80
81 struct fuse_req *fuse_request_alloc(unsigned npages)
82 {
83         return __fuse_request_alloc(npages, GFP_KERNEL);
84 }
85 EXPORT_SYMBOL_GPL(fuse_request_alloc);
86
87 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
88 {
89         return __fuse_request_alloc(npages, GFP_NOFS);
90 }
91
92 void fuse_request_free(struct fuse_req *req)
93 {
94         if (req->pages != req->inline_pages) {
95                 kfree(req->pages);
96                 kfree(req->page_descs);
97         }
98         kmem_cache_free(fuse_req_cachep, req);
99 }
100
101 static void block_sigs(sigset_t *oldset)
102 {
103         sigset_t mask;
104
105         siginitsetinv(&mask, sigmask(SIGKILL));
106         sigprocmask(SIG_BLOCK, &mask, oldset);
107 }
108
109 static void restore_sigs(sigset_t *oldset)
110 {
111         sigprocmask(SIG_SETMASK, oldset, NULL);
112 }
113
114 void __fuse_get_request(struct fuse_req *req)
115 {
116         atomic_inc(&req->count);
117 }
118
119 /* Must be called with > 1 refcount */
120 static void __fuse_put_request(struct fuse_req *req)
121 {
122         BUG_ON(atomic_read(&req->count) < 2);
123         atomic_dec(&req->count);
124 }
125
126 static void fuse_req_init_context(struct fuse_req *req)
127 {
128         req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
129         req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
130         req->in.h.pid = current->pid;
131 }
132
133 void fuse_set_initialized(struct fuse_conn *fc)
134 {
135         /* Make sure stores before this are seen on another CPU */
136         smp_wmb();
137         fc->initialized = 1;
138 }
139
140 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
141 {
142         return !fc->initialized || (for_background && fc->blocked);
143 }
144
145 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
146                                        bool for_background)
147 {
148         struct fuse_req *req;
149         int err;
150         atomic_inc(&fc->num_waiting);
151
152         if (fuse_block_alloc(fc, for_background)) {
153                 sigset_t oldset;
154                 int intr;
155
156                 block_sigs(&oldset);
157                 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
158                                 !fuse_block_alloc(fc, for_background));
159                 restore_sigs(&oldset);
160                 err = -EINTR;
161                 if (intr)
162                         goto out;
163         }
164         /* Matches smp_wmb() in fuse_set_initialized() */
165         smp_rmb();
166
167         err = -ENOTCONN;
168         if (!fc->connected)
169                 goto out;
170
171         req = fuse_request_alloc(npages);
172         err = -ENOMEM;
173         if (!req) {
174                 if (for_background)
175                         wake_up(&fc->blocked_waitq);
176                 goto out;
177         }
178
179         fuse_req_init_context(req);
180         req->waiting = 1;
181         req->background = for_background;
182         return req;
183
184  out:
185         atomic_dec(&fc->num_waiting);
186         return ERR_PTR(err);
187 }
188
189 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
190 {
191         return __fuse_get_req(fc, npages, false);
192 }
193 EXPORT_SYMBOL_GPL(fuse_get_req);
194
195 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
196                                              unsigned npages)
197 {
198         return __fuse_get_req(fc, npages, true);
199 }
200 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
201
202 /*
203  * Return request in fuse_file->reserved_req.  However that may
204  * currently be in use.  If that is the case, wait for it to become
205  * available.
206  */
207 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
208                                          struct file *file)
209 {
210         struct fuse_req *req = NULL;
211         struct fuse_file *ff = file->private_data;
212
213         do {
214                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
215                 spin_lock(&fc->lock);
216                 if (ff->reserved_req) {
217                         req = ff->reserved_req;
218                         ff->reserved_req = NULL;
219                         req->stolen_file = get_file(file);
220                 }
221                 spin_unlock(&fc->lock);
222         } while (!req);
223
224         return req;
225 }
226
227 /*
228  * Put stolen request back into fuse_file->reserved_req
229  */
230 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
231 {
232         struct file *file = req->stolen_file;
233         struct fuse_file *ff = file->private_data;
234
235         spin_lock(&fc->lock);
236         fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
237         BUG_ON(ff->reserved_req);
238         ff->reserved_req = req;
239         wake_up_all(&fc->reserved_req_waitq);
240         spin_unlock(&fc->lock);
241         fput(file);
242 }
243
244 /*
245  * Gets a requests for a file operation, always succeeds
246  *
247  * This is used for sending the FLUSH request, which must get to
248  * userspace, due to POSIX locks which may need to be unlocked.
249  *
250  * If allocation fails due to OOM, use the reserved request in
251  * fuse_file.
252  *
253  * This is very unlikely to deadlock accidentally, since the
254  * filesystem should not have it's own file open.  If deadlock is
255  * intentional, it can still be broken by "aborting" the filesystem.
256  */
257 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
258                                              struct file *file)
259 {
260         struct fuse_req *req;
261
262         atomic_inc(&fc->num_waiting);
263         wait_event(fc->blocked_waitq, fc->initialized);
264         /* Matches smp_wmb() in fuse_set_initialized() */
265         smp_rmb();
266         req = fuse_request_alloc(0);
267         if (!req)
268                 req = get_reserved_req(fc, file);
269
270         fuse_req_init_context(req);
271         req->waiting = 1;
272         req->background = 0;
273         return req;
274 }
275
276 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
277 {
278         if (atomic_dec_and_test(&req->count)) {
279                 if (unlikely(req->background)) {
280                         /*
281                          * We get here in the unlikely case that a background
282                          * request was allocated but not sent
283                          */
284                         spin_lock(&fc->lock);
285                         if (!fc->blocked)
286                                 wake_up(&fc->blocked_waitq);
287                         spin_unlock(&fc->lock);
288                 }
289
290                 if (req->waiting)
291                         atomic_dec(&fc->num_waiting);
292
293                 if (req->stolen_file)
294                         put_reserved_req(fc, req);
295                 else
296                         fuse_request_free(req);
297         }
298 }
299 EXPORT_SYMBOL_GPL(fuse_put_request);
300
301 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
302 {
303         unsigned nbytes = 0;
304         unsigned i;
305
306         for (i = 0; i < numargs; i++)
307                 nbytes += args[i].size;
308
309         return nbytes;
310 }
311
312 static u64 fuse_get_unique(struct fuse_conn *fc)
313 {
314         fc->reqctr++;
315         /* zero is special */
316         if (fc->reqctr == 0)
317                 fc->reqctr = 1;
318
319         return fc->reqctr;
320 }
321
322 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
323 {
324         req->in.h.len = sizeof(struct fuse_in_header) +
325                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
326         list_add_tail(&req->list, &fc->pending);
327         req->state = FUSE_REQ_PENDING;
328         if (!req->waiting) {
329                 req->waiting = 1;
330                 atomic_inc(&fc->num_waiting);
331         }
332         wake_up(&fc->waitq);
333         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
334 }
335
336 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
337                        u64 nodeid, u64 nlookup)
338 {
339         forget->forget_one.nodeid = nodeid;
340         forget->forget_one.nlookup = nlookup;
341
342         spin_lock(&fc->lock);
343         if (fc->connected) {
344                 fc->forget_list_tail->next = forget;
345                 fc->forget_list_tail = forget;
346                 wake_up(&fc->waitq);
347                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
348         } else {
349                 kfree(forget);
350         }
351         spin_unlock(&fc->lock);
352 }
353
354 static void flush_bg_queue(struct fuse_conn *fc)
355 {
356         while (fc->active_background < fc->max_background &&
357                !list_empty(&fc->bg_queue)) {
358                 struct fuse_req *req;
359
360                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
361                 list_del(&req->list);
362                 fc->active_background++;
363                 req->in.h.unique = fuse_get_unique(fc);
364                 queue_request(fc, req);
365         }
366 }
367
368 /*
369  * This function is called when a request is finished.  Either a reply
370  * has arrived or it was aborted (and not yet sent) or some error
371  * occurred during communication with userspace, or the device file
372  * was closed.  The requester thread is woken up (if still waiting),
373  * the 'end' callback is called if given, else the reference to the
374  * request is released
375  *
376  * Called with fc->lock, unlocks it
377  */
378 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
379 __releases(fc->lock)
380 {
381         void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
382         req->end = NULL;
383         list_del(&req->list);
384         list_del(&req->intr_entry);
385         req->state = FUSE_REQ_FINISHED;
386         if (req->background) {
387                 req->background = 0;
388
389                 if (fc->num_background == fc->max_background)
390                         fc->blocked = 0;
391
392                 /* Wake up next waiter, if any */
393                 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
394                         wake_up(&fc->blocked_waitq);
395
396                 if (fc->num_background == fc->congestion_threshold &&
397                     fc->connected && fc->bdi_initialized) {
398                         clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
399                         clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
400                 }
401                 fc->num_background--;
402                 fc->active_background--;
403                 flush_bg_queue(fc);
404         }
405         spin_unlock(&fc->lock);
406         wake_up(&req->waitq);
407         if (end)
408                 end(fc, req);
409         fuse_put_request(fc, req);
410 }
411
412 static void wait_answer_interruptible(struct fuse_conn *fc,
413                                       struct fuse_req *req)
414 __releases(fc->lock)
415 __acquires(fc->lock)
416 {
417         if (signal_pending(current))
418                 return;
419
420         spin_unlock(&fc->lock);
421         wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
422         spin_lock(&fc->lock);
423 }
424
425 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
426 {
427         list_add_tail(&req->intr_entry, &fc->interrupts);
428         wake_up(&fc->waitq);
429         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
430 }
431
432 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
433 __releases(fc->lock)
434 __acquires(fc->lock)
435 {
436         if (!fc->no_interrupt) {
437                 /* Any signal may interrupt this */
438                 wait_answer_interruptible(fc, req);
439
440                 if (req->aborted)
441                         goto aborted;
442                 if (req->state == FUSE_REQ_FINISHED)
443                         return;
444
445                 req->interrupted = 1;
446                 if (req->state == FUSE_REQ_SENT)
447                         queue_interrupt(fc, req);
448         }
449
450         if (!req->force) {
451                 sigset_t oldset;
452
453                 /* Only fatal signals may interrupt this */
454                 block_sigs(&oldset);
455                 wait_answer_interruptible(fc, req);
456                 restore_sigs(&oldset);
457
458                 if (req->aborted)
459                         goto aborted;
460                 if (req->state == FUSE_REQ_FINISHED)
461                         return;
462
463                 /* Request is not yet in userspace, bail out */
464                 if (req->state == FUSE_REQ_PENDING) {
465                         list_del(&req->list);
466                         __fuse_put_request(req);
467                         req->out.h.error = -EINTR;
468                         return;
469                 }
470         }
471
472         /*
473          * Either request is already in userspace, or it was forced.
474          * Wait it out.
475          */
476         spin_unlock(&fc->lock);
477         wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
478         spin_lock(&fc->lock);
479
480         if (!req->aborted)
481                 return;
482
483  aborted:
484         BUG_ON(req->state != FUSE_REQ_FINISHED);
485         if (req->locked) {
486                 /* This is uninterruptible sleep, because data is
487                    being copied to/from the buffers of req.  During
488                    locked state, there mustn't be any filesystem
489                    operation (e.g. page fault), since that could lead
490                    to deadlock */
491                 spin_unlock(&fc->lock);
492                 wait_event(req->waitq, !req->locked);
493                 spin_lock(&fc->lock);
494         }
495 }
496
497 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
498 {
499         BUG_ON(req->background);
500         spin_lock(&fc->lock);
501         if (!fc->connected)
502                 req->out.h.error = -ENOTCONN;
503         else if (fc->conn_error)
504                 req->out.h.error = -ECONNREFUSED;
505         else {
506                 req->in.h.unique = fuse_get_unique(fc);
507                 queue_request(fc, req);
508                 /* acquire extra reference, since request is still needed
509                    after request_end() */
510                 __fuse_get_request(req);
511
512                 request_wait_answer(fc, req);
513         }
514         spin_unlock(&fc->lock);
515 }
516
517 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
518 {
519         req->isreply = 1;
520         __fuse_request_send(fc, req);
521 }
522 EXPORT_SYMBOL_GPL(fuse_request_send);
523
524 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
525 {
526         if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
527                 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
528
529         if (fc->minor < 9) {
530                 switch (args->in.h.opcode) {
531                 case FUSE_LOOKUP:
532                 case FUSE_CREATE:
533                 case FUSE_MKNOD:
534                 case FUSE_MKDIR:
535                 case FUSE_SYMLINK:
536                 case FUSE_LINK:
537                         args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
538                         break;
539                 case FUSE_GETATTR:
540                 case FUSE_SETATTR:
541                         args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
542                         break;
543                 }
544         }
545         if (fc->minor < 12) {
546                 switch (args->in.h.opcode) {
547                 case FUSE_CREATE:
548                         args->in.args[0].size = sizeof(struct fuse_open_in);
549                         break;
550                 case FUSE_MKNOD:
551                         args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
552                         break;
553                 }
554         }
555 }
556
557 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
558 {
559         struct fuse_req *req;
560         ssize_t ret;
561
562         req = fuse_get_req(fc, 0);
563         if (IS_ERR(req))
564                 return PTR_ERR(req);
565
566         /* Needs to be done after fuse_get_req() so that fc->minor is valid */
567         fuse_adjust_compat(fc, args);
568
569         req->in.h.opcode = args->in.h.opcode;
570         req->in.h.nodeid = args->in.h.nodeid;
571         req->in.numargs = args->in.numargs;
572         memcpy(req->in.args, args->in.args,
573                args->in.numargs * sizeof(struct fuse_in_arg));
574         req->out.argvar = args->out.argvar;
575         req->out.numargs = args->out.numargs;
576         memcpy(req->out.args, args->out.args,
577                args->out.numargs * sizeof(struct fuse_arg));
578         fuse_request_send(fc, req);
579         ret = req->out.h.error;
580         if (!ret && args->out.argvar) {
581                 BUG_ON(args->out.numargs != 1);
582                 ret = req->out.args[0].size;
583         }
584         fuse_put_request(fc, req);
585
586         return ret;
587 }
588
589 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
590                                             struct fuse_req *req)
591 {
592         BUG_ON(!req->background);
593         fc->num_background++;
594         if (fc->num_background == fc->max_background)
595                 fc->blocked = 1;
596         if (fc->num_background == fc->congestion_threshold &&
597             fc->bdi_initialized) {
598                 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
599                 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
600         }
601         list_add_tail(&req->list, &fc->bg_queue);
602         flush_bg_queue(fc);
603 }
604
605 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
606 {
607         spin_lock(&fc->lock);
608         if (fc->connected) {
609                 fuse_request_send_nowait_locked(fc, req);
610                 spin_unlock(&fc->lock);
611         } else {
612                 req->out.h.error = -ENOTCONN;
613                 request_end(fc, req);
614         }
615 }
616
617 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
618 {
619         req->isreply = 1;
620         fuse_request_send_nowait(fc, req);
621 }
622 EXPORT_SYMBOL_GPL(fuse_request_send_background);
623
624 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
625                                           struct fuse_req *req, u64 unique)
626 {
627         int err = -ENODEV;
628
629         req->isreply = 0;
630         req->in.h.unique = unique;
631         spin_lock(&fc->lock);
632         if (fc->connected) {
633                 queue_request(fc, req);
634                 err = 0;
635         }
636         spin_unlock(&fc->lock);
637
638         return err;
639 }
640
641 /*
642  * Called under fc->lock
643  *
644  * fc->connected must have been checked previously
645  */
646 void fuse_request_send_background_locked(struct fuse_conn *fc,
647                                          struct fuse_req *req)
648 {
649         req->isreply = 1;
650         fuse_request_send_nowait_locked(fc, req);
651 }
652
653 void fuse_force_forget(struct file *file, u64 nodeid)
654 {
655         struct inode *inode = file_inode(file);
656         struct fuse_conn *fc = get_fuse_conn(inode);
657         struct fuse_req *req;
658         struct fuse_forget_in inarg;
659
660         memset(&inarg, 0, sizeof(inarg));
661         inarg.nlookup = 1;
662         req = fuse_get_req_nofail_nopages(fc, file);
663         req->in.h.opcode = FUSE_FORGET;
664         req->in.h.nodeid = nodeid;
665         req->in.numargs = 1;
666         req->in.args[0].size = sizeof(inarg);
667         req->in.args[0].value = &inarg;
668         req->isreply = 0;
669         __fuse_request_send(fc, req);
670         /* ignore errors */
671         fuse_put_request(fc, req);
672 }
673
674 /*
675  * Lock the request.  Up to the next unlock_request() there mustn't be
676  * anything that could cause a page-fault.  If the request was already
677  * aborted bail out.
678  */
679 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
680 {
681         int err = 0;
682         if (req) {
683                 spin_lock(&fc->lock);
684                 if (req->aborted)
685                         err = -ENOENT;
686                 else
687                         req->locked = 1;
688                 spin_unlock(&fc->lock);
689         }
690         return err;
691 }
692
693 /*
694  * Unlock request.  If it was aborted during being locked, the
695  * requester thread is currently waiting for it to be unlocked, so
696  * wake it up.
697  */
698 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
699 {
700         if (req) {
701                 spin_lock(&fc->lock);
702                 req->locked = 0;
703                 if (req->aborted)
704                         wake_up(&req->waitq);
705                 spin_unlock(&fc->lock);
706         }
707 }
708
709 struct fuse_copy_state {
710         struct fuse_conn *fc;
711         int write;
712         struct fuse_req *req;
713         const struct iovec *iov;
714         struct pipe_buffer *pipebufs;
715         struct pipe_buffer *currbuf;
716         struct pipe_inode_info *pipe;
717         unsigned long nr_segs;
718         unsigned long seglen;
719         unsigned long addr;
720         struct page *pg;
721         unsigned len;
722         unsigned offset;
723         unsigned move_pages:1;
724 };
725
726 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
727                            int write,
728                            const struct iovec *iov, unsigned long nr_segs)
729 {
730         memset(cs, 0, sizeof(*cs));
731         cs->fc = fc;
732         cs->write = write;
733         cs->iov = iov;
734         cs->nr_segs = nr_segs;
735 }
736
737 /* Unmap and put previous page of userspace buffer */
738 static void fuse_copy_finish(struct fuse_copy_state *cs)
739 {
740         if (cs->currbuf) {
741                 struct pipe_buffer *buf = cs->currbuf;
742
743                 if (cs->write)
744                         buf->len = PAGE_SIZE - cs->len;
745                 cs->currbuf = NULL;
746         } else if (cs->pg) {
747                 if (cs->write) {
748                         flush_dcache_page(cs->pg);
749                         set_page_dirty_lock(cs->pg);
750                 }
751                 put_page(cs->pg);
752         }
753         cs->pg = NULL;
754 }
755
756 /*
757  * Get another pagefull of userspace buffer, and map it to kernel
758  * address space, and lock request
759  */
760 static int fuse_copy_fill(struct fuse_copy_state *cs)
761 {
762         struct page *page;
763         int err;
764
765         unlock_request(cs->fc, cs->req);
766         fuse_copy_finish(cs);
767         if (cs->pipebufs) {
768                 struct pipe_buffer *buf = cs->pipebufs;
769
770                 if (!cs->write) {
771                         err = buf->ops->confirm(cs->pipe, buf);
772                         if (err)
773                                 return err;
774
775                         BUG_ON(!cs->nr_segs);
776                         cs->currbuf = buf;
777                         cs->pg = buf->page;
778                         cs->offset = buf->offset;
779                         cs->len = buf->len;
780                         cs->pipebufs++;
781                         cs->nr_segs--;
782                 } else {
783                         if (cs->nr_segs == cs->pipe->buffers)
784                                 return -EIO;
785
786                         page = alloc_page(GFP_HIGHUSER);
787                         if (!page)
788                                 return -ENOMEM;
789
790                         buf->page = page;
791                         buf->offset = 0;
792                         buf->len = 0;
793
794                         cs->currbuf = buf;
795                         cs->pg = page;
796                         cs->offset = 0;
797                         cs->len = PAGE_SIZE;
798                         cs->pipebufs++;
799                         cs->nr_segs++;
800                 }
801         } else {
802                 if (!cs->seglen) {
803                         BUG_ON(!cs->nr_segs);
804                         cs->seglen = cs->iov[0].iov_len;
805                         cs->addr = (unsigned long) cs->iov[0].iov_base;
806                         cs->iov++;
807                         cs->nr_segs--;
808                 }
809                 err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
810                 if (err < 0)
811                         return err;
812                 BUG_ON(err != 1);
813                 cs->pg = page;
814                 cs->offset = cs->addr % PAGE_SIZE;
815                 cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
816                 cs->seglen -= cs->len;
817                 cs->addr += cs->len;
818         }
819
820         return lock_request(cs->fc, cs->req);
821 }
822
823 /* Do as much copy to/from userspace buffer as we can */
824 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
825 {
826         unsigned ncpy = min(*size, cs->len);
827         if (val) {
828                 void *pgaddr = kmap_atomic(cs->pg);
829                 void *buf = pgaddr + cs->offset;
830
831                 if (cs->write)
832                         memcpy(buf, *val, ncpy);
833                 else
834                         memcpy(*val, buf, ncpy);
835
836                 kunmap_atomic(pgaddr);
837                 *val += ncpy;
838         }
839         *size -= ncpy;
840         cs->len -= ncpy;
841         cs->offset += ncpy;
842         return ncpy;
843 }
844
845 static int fuse_check_page(struct page *page)
846 {
847         if (page_mapcount(page) ||
848             page->mapping != NULL ||
849             page_count(page) != 1 ||
850             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
851              ~(1 << PG_locked |
852                1 << PG_referenced |
853                1 << PG_uptodate |
854                1 << PG_lru |
855                1 << PG_active |
856                1 << PG_reclaim))) {
857                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
858                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
859                 return 1;
860         }
861         return 0;
862 }
863
864 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
865 {
866         int err;
867         struct page *oldpage = *pagep;
868         struct page *newpage;
869         struct pipe_buffer *buf = cs->pipebufs;
870
871         unlock_request(cs->fc, cs->req);
872         fuse_copy_finish(cs);
873
874         err = buf->ops->confirm(cs->pipe, buf);
875         if (err)
876                 return err;
877
878         BUG_ON(!cs->nr_segs);
879         cs->currbuf = buf;
880         cs->len = buf->len;
881         cs->pipebufs++;
882         cs->nr_segs--;
883
884         if (cs->len != PAGE_SIZE)
885                 goto out_fallback;
886
887         if (buf->ops->steal(cs->pipe, buf) != 0)
888                 goto out_fallback;
889
890         newpage = buf->page;
891
892         if (!PageUptodate(newpage))
893                 SetPageUptodate(newpage);
894
895         ClearPageMappedToDisk(newpage);
896
897         if (fuse_check_page(newpage) != 0)
898                 goto out_fallback_unlock;
899
900         /*
901          * This is a new and locked page, it shouldn't be mapped or
902          * have any special flags on it
903          */
904         if (WARN_ON(page_mapped(oldpage)))
905                 goto out_fallback_unlock;
906         if (WARN_ON(page_has_private(oldpage)))
907                 goto out_fallback_unlock;
908         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
909                 goto out_fallback_unlock;
910         if (WARN_ON(PageMlocked(oldpage)))
911                 goto out_fallback_unlock;
912
913         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
914         if (err) {
915                 unlock_page(newpage);
916                 return err;
917         }
918
919         page_cache_get(newpage);
920
921         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
922                 lru_cache_add_file(newpage);
923
924         err = 0;
925         spin_lock(&cs->fc->lock);
926         if (cs->req->aborted)
927                 err = -ENOENT;
928         else
929                 *pagep = newpage;
930         spin_unlock(&cs->fc->lock);
931
932         if (err) {
933                 unlock_page(newpage);
934                 page_cache_release(newpage);
935                 return err;
936         }
937
938         unlock_page(oldpage);
939         page_cache_release(oldpage);
940         cs->len = 0;
941
942         return 0;
943
944 out_fallback_unlock:
945         unlock_page(newpage);
946 out_fallback:
947         cs->pg = buf->page;
948         cs->offset = buf->offset;
949
950         err = lock_request(cs->fc, cs->req);
951         if (err)
952                 return err;
953
954         return 1;
955 }
956
957 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
958                          unsigned offset, unsigned count)
959 {
960         struct pipe_buffer *buf;
961
962         if (cs->nr_segs == cs->pipe->buffers)
963                 return -EIO;
964
965         unlock_request(cs->fc, cs->req);
966         fuse_copy_finish(cs);
967
968         buf = cs->pipebufs;
969         page_cache_get(page);
970         buf->page = page;
971         buf->offset = offset;
972         buf->len = count;
973
974         cs->pipebufs++;
975         cs->nr_segs++;
976         cs->len = 0;
977
978         return 0;
979 }
980
981 /*
982  * Copy a page in the request to/from the userspace buffer.  Must be
983  * done atomically
984  */
985 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
986                           unsigned offset, unsigned count, int zeroing)
987 {
988         int err;
989         struct page *page = *pagep;
990
991         if (page && zeroing && count < PAGE_SIZE)
992                 clear_highpage(page);
993
994         while (count) {
995                 if (cs->write && cs->pipebufs && page) {
996                         return fuse_ref_page(cs, page, offset, count);
997                 } else if (!cs->len) {
998                         if (cs->move_pages && page &&
999                             offset == 0 && count == PAGE_SIZE) {
1000                                 err = fuse_try_move_page(cs, pagep);
1001                                 if (err <= 0)
1002                                         return err;
1003                         } else {
1004                                 err = fuse_copy_fill(cs);
1005                                 if (err)
1006                                         return err;
1007                         }
1008                 }
1009                 if (page) {
1010                         void *mapaddr = kmap_atomic(page);
1011                         void *buf = mapaddr + offset;
1012                         offset += fuse_copy_do(cs, &buf, &count);
1013                         kunmap_atomic(mapaddr);
1014                 } else
1015                         offset += fuse_copy_do(cs, NULL, &count);
1016         }
1017         if (page && !cs->write)
1018                 flush_dcache_page(page);
1019         return 0;
1020 }
1021
1022 /* Copy pages in the request to/from userspace buffer */
1023 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1024                            int zeroing)
1025 {
1026         unsigned i;
1027         struct fuse_req *req = cs->req;
1028
1029         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1030                 int err;
1031                 unsigned offset = req->page_descs[i].offset;
1032                 unsigned count = min(nbytes, req->page_descs[i].length);
1033
1034                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1035                                      zeroing);
1036                 if (err)
1037                         return err;
1038
1039                 nbytes -= count;
1040         }
1041         return 0;
1042 }
1043
1044 /* Copy a single argument in the request to/from userspace buffer */
1045 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1046 {
1047         while (size) {
1048                 if (!cs->len) {
1049                         int err = fuse_copy_fill(cs);
1050                         if (err)
1051                                 return err;
1052                 }
1053                 fuse_copy_do(cs, &val, &size);
1054         }
1055         return 0;
1056 }
1057
1058 /* Copy request arguments to/from userspace buffer */
1059 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1060                           unsigned argpages, struct fuse_arg *args,
1061                           int zeroing)
1062 {
1063         int err = 0;
1064         unsigned i;
1065
1066         for (i = 0; !err && i < numargs; i++)  {
1067                 struct fuse_arg *arg = &args[i];
1068                 if (i == numargs - 1 && argpages)
1069                         err = fuse_copy_pages(cs, arg->size, zeroing);
1070                 else
1071                         err = fuse_copy_one(cs, arg->value, arg->size);
1072         }
1073         return err;
1074 }
1075
1076 static int forget_pending(struct fuse_conn *fc)
1077 {
1078         return fc->forget_list_head.next != NULL;
1079 }
1080
1081 static int request_pending(struct fuse_conn *fc)
1082 {
1083         return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
1084                 forget_pending(fc);
1085 }
1086
1087 /* Wait until a request is available on the pending list */
1088 static void request_wait(struct fuse_conn *fc)
1089 __releases(fc->lock)
1090 __acquires(fc->lock)
1091 {
1092         DECLARE_WAITQUEUE(wait, current);
1093
1094         add_wait_queue_exclusive(&fc->waitq, &wait);
1095         while (fc->connected && !request_pending(fc)) {
1096                 set_current_state(TASK_INTERRUPTIBLE);
1097                 if (signal_pending(current))
1098                         break;
1099
1100                 spin_unlock(&fc->lock);
1101                 schedule();
1102                 spin_lock(&fc->lock);
1103         }
1104         set_current_state(TASK_RUNNING);
1105         remove_wait_queue(&fc->waitq, &wait);
1106 }
1107
1108 /*
1109  * Transfer an interrupt request to userspace
1110  *
1111  * Unlike other requests this is assembled on demand, without a need
1112  * to allocate a separate fuse_req structure.
1113  *
1114  * Called with fc->lock held, releases it
1115  */
1116 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1117                                size_t nbytes, struct fuse_req *req)
1118 __releases(fc->lock)
1119 {
1120         struct fuse_in_header ih;
1121         struct fuse_interrupt_in arg;
1122         unsigned reqsize = sizeof(ih) + sizeof(arg);
1123         int err;
1124
1125         list_del_init(&req->intr_entry);
1126         req->intr_unique = fuse_get_unique(fc);
1127         memset(&ih, 0, sizeof(ih));
1128         memset(&arg, 0, sizeof(arg));
1129         ih.len = reqsize;
1130         ih.opcode = FUSE_INTERRUPT;
1131         ih.unique = req->intr_unique;
1132         arg.unique = req->in.h.unique;
1133
1134         spin_unlock(&fc->lock);
1135         if (nbytes < reqsize)
1136                 return -EINVAL;
1137
1138         err = fuse_copy_one(cs, &ih, sizeof(ih));
1139         if (!err)
1140                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1141         fuse_copy_finish(cs);
1142
1143         return err ? err : reqsize;
1144 }
1145
1146 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1147                                                unsigned max,
1148                                                unsigned *countp)
1149 {
1150         struct fuse_forget_link *head = fc->forget_list_head.next;
1151         struct fuse_forget_link **newhead = &head;
1152         unsigned count;
1153
1154         for (count = 0; *newhead != NULL && count < max; count++)
1155                 newhead = &(*newhead)->next;
1156
1157         fc->forget_list_head.next = *newhead;
1158         *newhead = NULL;
1159         if (fc->forget_list_head.next == NULL)
1160                 fc->forget_list_tail = &fc->forget_list_head;
1161
1162         if (countp != NULL)
1163                 *countp = count;
1164
1165         return head;
1166 }
1167
1168 static int fuse_read_single_forget(struct fuse_conn *fc,
1169                                    struct fuse_copy_state *cs,
1170                                    size_t nbytes)
1171 __releases(fc->lock)
1172 {
1173         int err;
1174         struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1175         struct fuse_forget_in arg = {
1176                 .nlookup = forget->forget_one.nlookup,
1177         };
1178         struct fuse_in_header ih = {
1179                 .opcode = FUSE_FORGET,
1180                 .nodeid = forget->forget_one.nodeid,
1181                 .unique = fuse_get_unique(fc),
1182                 .len = sizeof(ih) + sizeof(arg),
1183         };
1184
1185         spin_unlock(&fc->lock);
1186         kfree(forget);
1187         if (nbytes < ih.len)
1188                 return -EINVAL;
1189
1190         err = fuse_copy_one(cs, &ih, sizeof(ih));
1191         if (!err)
1192                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1193         fuse_copy_finish(cs);
1194
1195         if (err)
1196                 return err;
1197
1198         return ih.len;
1199 }
1200
1201 static int fuse_read_batch_forget(struct fuse_conn *fc,
1202                                    struct fuse_copy_state *cs, size_t nbytes)
1203 __releases(fc->lock)
1204 {
1205         int err;
1206         unsigned max_forgets;
1207         unsigned count;
1208         struct fuse_forget_link *head;
1209         struct fuse_batch_forget_in arg = { .count = 0 };
1210         struct fuse_in_header ih = {
1211                 .opcode = FUSE_BATCH_FORGET,
1212                 .unique = fuse_get_unique(fc),
1213                 .len = sizeof(ih) + sizeof(arg),
1214         };
1215
1216         if (nbytes < ih.len) {
1217                 spin_unlock(&fc->lock);
1218                 return -EINVAL;
1219         }
1220
1221         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1222         head = dequeue_forget(fc, max_forgets, &count);
1223         spin_unlock(&fc->lock);
1224
1225         arg.count = count;
1226         ih.len += count * sizeof(struct fuse_forget_one);
1227         err = fuse_copy_one(cs, &ih, sizeof(ih));
1228         if (!err)
1229                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1230
1231         while (head) {
1232                 struct fuse_forget_link *forget = head;
1233
1234                 if (!err) {
1235                         err = fuse_copy_one(cs, &forget->forget_one,
1236                                             sizeof(forget->forget_one));
1237                 }
1238                 head = forget->next;
1239                 kfree(forget);
1240         }
1241
1242         fuse_copy_finish(cs);
1243
1244         if (err)
1245                 return err;
1246
1247         return ih.len;
1248 }
1249
1250 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1251                             size_t nbytes)
1252 __releases(fc->lock)
1253 {
1254         if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1255                 return fuse_read_single_forget(fc, cs, nbytes);
1256         else
1257                 return fuse_read_batch_forget(fc, cs, nbytes);
1258 }
1259
1260 /*
1261  * Read a single request into the userspace filesystem's buffer.  This
1262  * function waits until a request is available, then removes it from
1263  * the pending list and copies request data to userspace buffer.  If
1264  * no reply is needed (FORGET) or request has been aborted or there
1265  * was an error during the copying then it's finished by calling
1266  * request_end().  Otherwise add it to the processing list, and set
1267  * the 'sent' flag.
1268  */
1269 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1270                                 struct fuse_copy_state *cs, size_t nbytes)
1271 {
1272         int err;
1273         struct fuse_req *req;
1274         struct fuse_in *in;
1275         unsigned reqsize;
1276
1277  restart:
1278         spin_lock(&fc->lock);
1279         err = -EAGAIN;
1280         if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1281             !request_pending(fc))
1282                 goto err_unlock;
1283
1284         request_wait(fc);
1285         err = -ENODEV;
1286         if (!fc->connected)
1287                 goto err_unlock;
1288         err = -ERESTARTSYS;
1289         if (!request_pending(fc))
1290                 goto err_unlock;
1291
1292         if (!list_empty(&fc->interrupts)) {
1293                 req = list_entry(fc->interrupts.next, struct fuse_req,
1294                                  intr_entry);
1295                 return fuse_read_interrupt(fc, cs, nbytes, req);
1296         }
1297
1298         if (forget_pending(fc)) {
1299                 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1300                         return fuse_read_forget(fc, cs, nbytes);
1301
1302                 if (fc->forget_batch <= -8)
1303                         fc->forget_batch = 16;
1304         }
1305
1306         req = list_entry(fc->pending.next, struct fuse_req, list);
1307         req->state = FUSE_REQ_READING;
1308         list_move(&req->list, &fc->io);
1309
1310         in = &req->in;
1311         reqsize = in->h.len;
1312         /* If request is too large, reply with an error and restart the read */
1313         if (nbytes < reqsize) {
1314                 req->out.h.error = -EIO;
1315                 /* SETXATTR is special, since it may contain too large data */
1316                 if (in->h.opcode == FUSE_SETXATTR)
1317                         req->out.h.error = -E2BIG;
1318                 request_end(fc, req);
1319                 goto restart;
1320         }
1321         spin_unlock(&fc->lock);
1322         cs->req = req;
1323         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1324         if (!err)
1325                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1326                                      (struct fuse_arg *) in->args, 0);
1327         fuse_copy_finish(cs);
1328         spin_lock(&fc->lock);
1329         req->locked = 0;
1330         if (req->aborted) {
1331                 request_end(fc, req);
1332                 return -ENODEV;
1333         }
1334         if (err) {
1335                 req->out.h.error = -EIO;
1336                 request_end(fc, req);
1337                 return err;
1338         }
1339         if (!req->isreply)
1340                 request_end(fc, req);
1341         else {
1342                 req->state = FUSE_REQ_SENT;
1343                 list_move_tail(&req->list, &fc->processing);
1344                 if (req->interrupted)
1345                         queue_interrupt(fc, req);
1346                 spin_unlock(&fc->lock);
1347         }
1348         return reqsize;
1349
1350  err_unlock:
1351         spin_unlock(&fc->lock);
1352         return err;
1353 }
1354
1355 static int fuse_dev_open(struct inode *inode, struct file *file)
1356 {
1357         /*
1358          * The fuse device's file's private_data is used to hold
1359          * the fuse_conn(ection) when it is mounted, and is used to
1360          * keep track of whether the file has been mounted already.
1361          */
1362         file->private_data = NULL;
1363         return 0;
1364 }
1365
1366 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1367                               unsigned long nr_segs, loff_t pos)
1368 {
1369         struct fuse_copy_state cs;
1370         struct file *file = iocb->ki_filp;
1371         struct fuse_conn *fc = fuse_get_conn(file);
1372         if (!fc)
1373                 return -EPERM;
1374
1375         fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1376
1377         return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1378 }
1379
1380 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1381                                     struct pipe_inode_info *pipe,
1382                                     size_t len, unsigned int flags)
1383 {
1384         int ret;
1385         int page_nr = 0;
1386         int do_wakeup = 0;
1387         struct pipe_buffer *bufs;
1388         struct fuse_copy_state cs;
1389         struct fuse_conn *fc = fuse_get_conn(in);
1390         if (!fc)
1391                 return -EPERM;
1392
1393         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1394         if (!bufs)
1395                 return -ENOMEM;
1396
1397         fuse_copy_init(&cs, fc, 1, NULL, 0);
1398         cs.pipebufs = bufs;
1399         cs.pipe = pipe;
1400         ret = fuse_dev_do_read(fc, in, &cs, len);
1401         if (ret < 0)
1402                 goto out;
1403
1404         ret = 0;
1405         pipe_lock(pipe);
1406
1407         if (!pipe->readers) {
1408                 send_sig(SIGPIPE, current, 0);
1409                 if (!ret)
1410                         ret = -EPIPE;
1411                 goto out_unlock;
1412         }
1413
1414         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1415                 ret = -EIO;
1416                 goto out_unlock;
1417         }
1418
1419         while (page_nr < cs.nr_segs) {
1420                 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1421                 struct pipe_buffer *buf = pipe->bufs + newbuf;
1422
1423                 buf->page = bufs[page_nr].page;
1424                 buf->offset = bufs[page_nr].offset;
1425                 buf->len = bufs[page_nr].len;
1426                 /*
1427                  * Need to be careful about this.  Having buf->ops in module
1428                  * code can Oops if the buffer persists after module unload.
1429                  */
1430                 buf->ops = &nosteal_pipe_buf_ops;
1431
1432                 pipe->nrbufs++;
1433                 page_nr++;
1434                 ret += buf->len;
1435
1436                 if (pipe->files)
1437                         do_wakeup = 1;
1438         }
1439
1440 out_unlock:
1441         pipe_unlock(pipe);
1442
1443         if (do_wakeup) {
1444                 smp_mb();
1445                 if (waitqueue_active(&pipe->wait))
1446                         wake_up_interruptible(&pipe->wait);
1447                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1448         }
1449
1450 out:
1451         for (; page_nr < cs.nr_segs; page_nr++)
1452                 page_cache_release(bufs[page_nr].page);
1453
1454         kfree(bufs);
1455         return ret;
1456 }
1457
1458 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1459                             struct fuse_copy_state *cs)
1460 {
1461         struct fuse_notify_poll_wakeup_out outarg;
1462         int err = -EINVAL;
1463
1464         if (size != sizeof(outarg))
1465                 goto err;
1466
1467         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1468         if (err)
1469                 goto err;
1470
1471         fuse_copy_finish(cs);
1472         return fuse_notify_poll_wakeup(fc, &outarg);
1473
1474 err:
1475         fuse_copy_finish(cs);
1476         return err;
1477 }
1478
1479 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1480                                    struct fuse_copy_state *cs)
1481 {
1482         struct fuse_notify_inval_inode_out outarg;
1483         int err = -EINVAL;
1484
1485         if (size != sizeof(outarg))
1486                 goto err;
1487
1488         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1489         if (err)
1490                 goto err;
1491         fuse_copy_finish(cs);
1492
1493         down_read(&fc->killsb);
1494         err = -ENOENT;
1495         if (fc->sb) {
1496                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1497                                                outarg.off, outarg.len);
1498         }
1499         up_read(&fc->killsb);
1500         return err;
1501
1502 err:
1503         fuse_copy_finish(cs);
1504         return err;
1505 }
1506
1507 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1508                                    struct fuse_copy_state *cs)
1509 {
1510         struct fuse_notify_inval_entry_out outarg;
1511         int err = -ENOMEM;
1512         char *buf;
1513         struct qstr name;
1514
1515         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1516         if (!buf)
1517                 goto err;
1518
1519         err = -EINVAL;
1520         if (size < sizeof(outarg))
1521                 goto err;
1522
1523         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1524         if (err)
1525                 goto err;
1526
1527         err = -ENAMETOOLONG;
1528         if (outarg.namelen > FUSE_NAME_MAX)
1529                 goto err;
1530
1531         err = -EINVAL;
1532         if (size != sizeof(outarg) + outarg.namelen + 1)
1533                 goto err;
1534
1535         name.name = buf;
1536         name.len = outarg.namelen;
1537         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1538         if (err)
1539                 goto err;
1540         fuse_copy_finish(cs);
1541         buf[outarg.namelen] = 0;
1542         name.hash = full_name_hash(name.name, name.len);
1543
1544         down_read(&fc->killsb);
1545         err = -ENOENT;
1546         if (fc->sb)
1547                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1548         up_read(&fc->killsb);
1549         kfree(buf);
1550         return err;
1551
1552 err:
1553         kfree(buf);
1554         fuse_copy_finish(cs);
1555         return err;
1556 }
1557
1558 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1559                               struct fuse_copy_state *cs)
1560 {
1561         struct fuse_notify_delete_out outarg;
1562         int err = -ENOMEM;
1563         char *buf;
1564         struct qstr name;
1565
1566         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1567         if (!buf)
1568                 goto err;
1569
1570         err = -EINVAL;
1571         if (size < sizeof(outarg))
1572                 goto err;
1573
1574         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1575         if (err)
1576                 goto err;
1577
1578         err = -ENAMETOOLONG;
1579         if (outarg.namelen > FUSE_NAME_MAX)
1580                 goto err;
1581
1582         err = -EINVAL;
1583         if (size != sizeof(outarg) + outarg.namelen + 1)
1584                 goto err;
1585
1586         name.name = buf;
1587         name.len = outarg.namelen;
1588         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1589         if (err)
1590                 goto err;
1591         fuse_copy_finish(cs);
1592         buf[outarg.namelen] = 0;
1593         name.hash = full_name_hash(name.name, name.len);
1594
1595         down_read(&fc->killsb);
1596         err = -ENOENT;
1597         if (fc->sb)
1598                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1599                                                outarg.child, &name);
1600         up_read(&fc->killsb);
1601         kfree(buf);
1602         return err;
1603
1604 err:
1605         kfree(buf);
1606         fuse_copy_finish(cs);
1607         return err;
1608 }
1609
1610 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1611                              struct fuse_copy_state *cs)
1612 {
1613         struct fuse_notify_store_out outarg;
1614         struct inode *inode;
1615         struct address_space *mapping;
1616         u64 nodeid;
1617         int err;
1618         pgoff_t index;
1619         unsigned int offset;
1620         unsigned int num;
1621         loff_t file_size;
1622         loff_t end;
1623
1624         err = -EINVAL;
1625         if (size < sizeof(outarg))
1626                 goto out_finish;
1627
1628         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1629         if (err)
1630                 goto out_finish;
1631
1632         err = -EINVAL;
1633         if (size - sizeof(outarg) != outarg.size)
1634                 goto out_finish;
1635
1636         nodeid = outarg.nodeid;
1637
1638         down_read(&fc->killsb);
1639
1640         err = -ENOENT;
1641         if (!fc->sb)
1642                 goto out_up_killsb;
1643
1644         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1645         if (!inode)
1646                 goto out_up_killsb;
1647
1648         mapping = inode->i_mapping;
1649         index = outarg.offset >> PAGE_CACHE_SHIFT;
1650         offset = outarg.offset & ~PAGE_CACHE_MASK;
1651         file_size = i_size_read(inode);
1652         end = outarg.offset + outarg.size;
1653         if (end > file_size) {
1654                 file_size = end;
1655                 fuse_write_update_size(inode, file_size);
1656         }
1657
1658         num = outarg.size;
1659         while (num) {
1660                 struct page *page;
1661                 unsigned int this_num;
1662
1663                 err = -ENOMEM;
1664                 page = find_or_create_page(mapping, index,
1665                                            mapping_gfp_mask(mapping));
1666                 if (!page)
1667                         goto out_iput;
1668
1669                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1670                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1671                 if (!err && offset == 0 &&
1672                     (this_num == PAGE_CACHE_SIZE || file_size == end))
1673                         SetPageUptodate(page);
1674                 unlock_page(page);
1675                 page_cache_release(page);
1676
1677                 if (err)
1678                         goto out_iput;
1679
1680                 num -= this_num;
1681                 offset = 0;
1682                 index++;
1683         }
1684
1685         err = 0;
1686
1687 out_iput:
1688         iput(inode);
1689 out_up_killsb:
1690         up_read(&fc->killsb);
1691 out_finish:
1692         fuse_copy_finish(cs);
1693         return err;
1694 }
1695
1696 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1697 {
1698         release_pages(req->pages, req->num_pages, false);
1699 }
1700
1701 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1702                          struct fuse_notify_retrieve_out *outarg)
1703 {
1704         int err;
1705         struct address_space *mapping = inode->i_mapping;
1706         struct fuse_req *req;
1707         pgoff_t index;
1708         loff_t file_size;
1709         unsigned int num;
1710         unsigned int offset;
1711         size_t total_len = 0;
1712         int num_pages;
1713
1714         offset = outarg->offset & ~PAGE_CACHE_MASK;
1715         file_size = i_size_read(inode);
1716
1717         num = outarg->size;
1718         if (outarg->offset > file_size)
1719                 num = 0;
1720         else if (outarg->offset + num > file_size)
1721                 num = file_size - outarg->offset;
1722
1723         num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1724         num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1725
1726         req = fuse_get_req(fc, num_pages);
1727         if (IS_ERR(req))
1728                 return PTR_ERR(req);
1729
1730         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1731         req->in.h.nodeid = outarg->nodeid;
1732         req->in.numargs = 2;
1733         req->in.argpages = 1;
1734         req->page_descs[0].offset = offset;
1735         req->end = fuse_retrieve_end;
1736
1737         index = outarg->offset >> PAGE_CACHE_SHIFT;
1738
1739         while (num && req->num_pages < num_pages) {
1740                 struct page *page;
1741                 unsigned int this_num;
1742
1743                 page = find_get_page(mapping, index);
1744                 if (!page)
1745                         break;
1746
1747                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1748                 req->pages[req->num_pages] = page;
1749                 req->page_descs[req->num_pages].length = this_num;
1750                 req->num_pages++;
1751
1752                 offset = 0;
1753                 num -= this_num;
1754                 total_len += this_num;
1755                 index++;
1756         }
1757         req->misc.retrieve_in.offset = outarg->offset;
1758         req->misc.retrieve_in.size = total_len;
1759         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1760         req->in.args[0].value = &req->misc.retrieve_in;
1761         req->in.args[1].size = total_len;
1762
1763         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1764         if (err)
1765                 fuse_retrieve_end(fc, req);
1766
1767         return err;
1768 }
1769
1770 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1771                                 struct fuse_copy_state *cs)
1772 {
1773         struct fuse_notify_retrieve_out outarg;
1774         struct inode *inode;
1775         int err;
1776
1777         err = -EINVAL;
1778         if (size != sizeof(outarg))
1779                 goto copy_finish;
1780
1781         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1782         if (err)
1783                 goto copy_finish;
1784
1785         fuse_copy_finish(cs);
1786
1787         down_read(&fc->killsb);
1788         err = -ENOENT;
1789         if (fc->sb) {
1790                 u64 nodeid = outarg.nodeid;
1791
1792                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1793                 if (inode) {
1794                         err = fuse_retrieve(fc, inode, &outarg);
1795                         iput(inode);
1796                 }
1797         }
1798         up_read(&fc->killsb);
1799
1800         return err;
1801
1802 copy_finish:
1803         fuse_copy_finish(cs);
1804         return err;
1805 }
1806
1807 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1808                        unsigned int size, struct fuse_copy_state *cs)
1809 {
1810         /* Don't try to move pages (yet) */
1811         cs->move_pages = 0;
1812
1813         switch (code) {
1814         case FUSE_NOTIFY_POLL:
1815                 return fuse_notify_poll(fc, size, cs);
1816
1817         case FUSE_NOTIFY_INVAL_INODE:
1818                 return fuse_notify_inval_inode(fc, size, cs);
1819
1820         case FUSE_NOTIFY_INVAL_ENTRY:
1821                 return fuse_notify_inval_entry(fc, size, cs);
1822
1823         case FUSE_NOTIFY_STORE:
1824                 return fuse_notify_store(fc, size, cs);
1825
1826         case FUSE_NOTIFY_RETRIEVE:
1827                 return fuse_notify_retrieve(fc, size, cs);
1828
1829         case FUSE_NOTIFY_DELETE:
1830                 return fuse_notify_delete(fc, size, cs);
1831
1832         default:
1833                 fuse_copy_finish(cs);
1834                 return -EINVAL;
1835         }
1836 }
1837
1838 /* Look up request on processing list by unique ID */
1839 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1840 {
1841         struct fuse_req *req;
1842
1843         list_for_each_entry(req, &fc->processing, list) {
1844                 if (req->in.h.unique == unique || req->intr_unique == unique)
1845                         return req;
1846         }
1847         return NULL;
1848 }
1849
1850 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1851                          unsigned nbytes)
1852 {
1853         unsigned reqsize = sizeof(struct fuse_out_header);
1854
1855         if (out->h.error)
1856                 return nbytes != reqsize ? -EINVAL : 0;
1857
1858         reqsize += len_args(out->numargs, out->args);
1859
1860         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1861                 return -EINVAL;
1862         else if (reqsize > nbytes) {
1863                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1864                 unsigned diffsize = reqsize - nbytes;
1865                 if (diffsize > lastarg->size)
1866                         return -EINVAL;
1867                 lastarg->size -= diffsize;
1868         }
1869         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1870                               out->page_zeroing);
1871 }
1872
1873 /*
1874  * Write a single reply to a request.  First the header is copied from
1875  * the write buffer.  The request is then searched on the processing
1876  * list by the unique ID found in the header.  If found, then remove
1877  * it from the list and copy the rest of the buffer to the request.
1878  * The request is finished by calling request_end()
1879  */
1880 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1881                                  struct fuse_copy_state *cs, size_t nbytes)
1882 {
1883         int err;
1884         struct fuse_req *req;
1885         struct fuse_out_header oh;
1886
1887         if (nbytes < sizeof(struct fuse_out_header))
1888                 return -EINVAL;
1889
1890         err = fuse_copy_one(cs, &oh, sizeof(oh));
1891         if (err)
1892                 goto err_finish;
1893
1894         err = -EINVAL;
1895         if (oh.len != nbytes)
1896                 goto err_finish;
1897
1898         /*
1899          * Zero oh.unique indicates unsolicited notification message
1900          * and error contains notification code.
1901          */
1902         if (!oh.unique) {
1903                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1904                 return err ? err : nbytes;
1905         }
1906
1907         err = -EINVAL;
1908         if (oh.error <= -1000 || oh.error > 0)
1909                 goto err_finish;
1910
1911         spin_lock(&fc->lock);
1912         err = -ENOENT;
1913         if (!fc->connected)
1914                 goto err_unlock;
1915
1916         req = request_find(fc, oh.unique);
1917         if (!req)
1918                 goto err_unlock;
1919
1920         if (req->aborted) {
1921                 spin_unlock(&fc->lock);
1922                 fuse_copy_finish(cs);
1923                 spin_lock(&fc->lock);
1924                 request_end(fc, req);
1925                 return -ENOENT;
1926         }
1927         /* Is it an interrupt reply? */
1928         if (req->intr_unique == oh.unique) {
1929                 err = -EINVAL;
1930                 if (nbytes != sizeof(struct fuse_out_header))
1931                         goto err_unlock;
1932
1933                 if (oh.error == -ENOSYS)
1934                         fc->no_interrupt = 1;
1935                 else if (oh.error == -EAGAIN)
1936                         queue_interrupt(fc, req);
1937
1938                 spin_unlock(&fc->lock);
1939                 fuse_copy_finish(cs);
1940                 return nbytes;
1941         }
1942
1943         req->state = FUSE_REQ_WRITING;
1944         list_move(&req->list, &fc->io);
1945         req->out.h = oh;
1946         req->locked = 1;
1947         cs->req = req;
1948         if (!req->out.page_replace)
1949                 cs->move_pages = 0;
1950         spin_unlock(&fc->lock);
1951
1952         err = copy_out_args(cs, &req->out, nbytes);
1953         fuse_copy_finish(cs);
1954
1955         spin_lock(&fc->lock);
1956         req->locked = 0;
1957         if (!err) {
1958                 if (req->aborted)
1959                         err = -ENOENT;
1960         } else if (!req->aborted)
1961                 req->out.h.error = -EIO;
1962         request_end(fc, req);
1963
1964         return err ? err : nbytes;
1965
1966  err_unlock:
1967         spin_unlock(&fc->lock);
1968  err_finish:
1969         fuse_copy_finish(cs);
1970         return err;
1971 }
1972
1973 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1974                               unsigned long nr_segs, loff_t pos)
1975 {
1976         struct fuse_copy_state cs;
1977         struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1978         if (!fc)
1979                 return -EPERM;
1980
1981         fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1982
1983         return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1984 }
1985
1986 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1987                                      struct file *out, loff_t *ppos,
1988                                      size_t len, unsigned int flags)
1989 {
1990         unsigned nbuf;
1991         unsigned idx;
1992         struct pipe_buffer *bufs;
1993         struct fuse_copy_state cs;
1994         struct fuse_conn *fc;
1995         size_t rem;
1996         ssize_t ret;
1997
1998         fc = fuse_get_conn(out);
1999         if (!fc)
2000                 return -EPERM;
2001
2002         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
2003         if (!bufs)
2004                 return -ENOMEM;
2005
2006         pipe_lock(pipe);
2007         nbuf = 0;
2008         rem = 0;
2009         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
2010                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
2011
2012         ret = -EINVAL;
2013         if (rem < len) {
2014                 pipe_unlock(pipe);
2015                 goto out;
2016         }
2017
2018         rem = len;
2019         while (rem) {
2020                 struct pipe_buffer *ibuf;
2021                 struct pipe_buffer *obuf;
2022
2023                 BUG_ON(nbuf >= pipe->buffers);
2024                 BUG_ON(!pipe->nrbufs);
2025                 ibuf = &pipe->bufs[pipe->curbuf];
2026                 obuf = &bufs[nbuf];
2027
2028                 if (rem >= ibuf->len) {
2029                         *obuf = *ibuf;
2030                         ibuf->ops = NULL;
2031                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2032                         pipe->nrbufs--;
2033                 } else {
2034                         ibuf->ops->get(pipe, ibuf);
2035                         *obuf = *ibuf;
2036                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2037                         obuf->len = rem;
2038                         ibuf->offset += obuf->len;
2039                         ibuf->len -= obuf->len;
2040                 }
2041                 nbuf++;
2042                 rem -= obuf->len;
2043         }
2044         pipe_unlock(pipe);
2045
2046         fuse_copy_init(&cs, fc, 0, NULL, nbuf);
2047         cs.pipebufs = bufs;
2048         cs.pipe = pipe;
2049
2050         if (flags & SPLICE_F_MOVE)
2051                 cs.move_pages = 1;
2052
2053         ret = fuse_dev_do_write(fc, &cs, len);
2054
2055         for (idx = 0; idx < nbuf; idx++) {
2056                 struct pipe_buffer *buf = &bufs[idx];
2057                 buf->ops->release(pipe, buf);
2058         }
2059 out:
2060         kfree(bufs);
2061         return ret;
2062 }
2063
2064 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2065 {
2066         unsigned mask = POLLOUT | POLLWRNORM;
2067         struct fuse_conn *fc = fuse_get_conn(file);
2068         if (!fc)
2069                 return POLLERR;
2070
2071         poll_wait(file, &fc->waitq, wait);
2072
2073         spin_lock(&fc->lock);
2074         if (!fc->connected)
2075                 mask = POLLERR;
2076         else if (request_pending(fc))
2077                 mask |= POLLIN | POLLRDNORM;
2078         spin_unlock(&fc->lock);
2079
2080         return mask;
2081 }
2082
2083 /*
2084  * Abort all requests on the given list (pending or processing)
2085  *
2086  * This function releases and reacquires fc->lock
2087  */
2088 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2089 __releases(fc->lock)
2090 __acquires(fc->lock)
2091 {
2092         while (!list_empty(head)) {
2093                 struct fuse_req *req;
2094                 req = list_entry(head->next, struct fuse_req, list);
2095                 req->out.h.error = -ECONNABORTED;
2096                 request_end(fc, req);
2097                 spin_lock(&fc->lock);
2098         }
2099 }
2100
2101 /*
2102  * Abort requests under I/O
2103  *
2104  * The requests are set to aborted and finished, and the request
2105  * waiter is woken up.  This will make request_wait_answer() wait
2106  * until the request is unlocked and then return.
2107  *
2108  * If the request is asynchronous, then the end function needs to be
2109  * called after waiting for the request to be unlocked (if it was
2110  * locked).
2111  */
2112 static void end_io_requests(struct fuse_conn *fc)
2113 __releases(fc->lock)
2114 __acquires(fc->lock)
2115 {
2116         while (!list_empty(&fc->io)) {
2117                 struct fuse_req *req =
2118                         list_entry(fc->io.next, struct fuse_req, list);
2119                 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2120
2121                 req->aborted = 1;
2122                 req->out.h.error = -ECONNABORTED;
2123                 req->state = FUSE_REQ_FINISHED;
2124                 list_del_init(&req->list);
2125                 wake_up(&req->waitq);
2126                 if (end) {
2127                         req->end = NULL;
2128                         __fuse_get_request(req);
2129                         spin_unlock(&fc->lock);
2130                         wait_event(req->waitq, !req->locked);
2131                         end(fc, req);
2132                         fuse_put_request(fc, req);
2133                         spin_lock(&fc->lock);
2134                 }
2135         }
2136 }
2137
2138 static void end_queued_requests(struct fuse_conn *fc)
2139 __releases(fc->lock)
2140 __acquires(fc->lock)
2141 {
2142         fc->max_background = UINT_MAX;
2143         flush_bg_queue(fc);
2144         end_requests(fc, &fc->pending);
2145         end_requests(fc, &fc->processing);
2146         while (forget_pending(fc))
2147                 kfree(dequeue_forget(fc, 1, NULL));
2148 }
2149
2150 static void end_polls(struct fuse_conn *fc)
2151 {
2152         struct rb_node *p;
2153
2154         p = rb_first(&fc->polled_files);
2155
2156         while (p) {
2157                 struct fuse_file *ff;
2158                 ff = rb_entry(p, struct fuse_file, polled_node);
2159                 wake_up_interruptible_all(&ff->poll_wait);
2160
2161                 p = rb_next(p);
2162         }
2163 }
2164
2165 /*
2166  * Abort all requests.
2167  *
2168  * Emergency exit in case of a malicious or accidental deadlock, or
2169  * just a hung filesystem.
2170  *
2171  * The same effect is usually achievable through killing the
2172  * filesystem daemon and all users of the filesystem.  The exception
2173  * is the combination of an asynchronous request and the tricky
2174  * deadlock (see Documentation/filesystems/fuse.txt).
2175  *
2176  * During the aborting, progression of requests from the pending and
2177  * processing lists onto the io list, and progression of new requests
2178  * onto the pending list is prevented by req->connected being false.
2179  *
2180  * Progression of requests under I/O to the processing list is
2181  * prevented by the req->aborted flag being true for these requests.
2182  * For this reason requests on the io list must be aborted first.
2183  */
2184 void fuse_abort_conn(struct fuse_conn *fc)
2185 {
2186         spin_lock(&fc->lock);
2187         if (fc->connected) {
2188                 fc->connected = 0;
2189                 fc->blocked = 0;
2190                 fuse_set_initialized(fc);
2191                 end_io_requests(fc);
2192                 end_queued_requests(fc);
2193                 end_polls(fc);
2194                 wake_up_all(&fc->waitq);
2195                 wake_up_all(&fc->blocked_waitq);
2196                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2197         }
2198         spin_unlock(&fc->lock);
2199 }
2200 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2201
2202 int fuse_dev_release(struct inode *inode, struct file *file)
2203 {
2204         struct fuse_conn *fc = fuse_get_conn(file);
2205         if (fc) {
2206                 spin_lock(&fc->lock);
2207                 fc->connected = 0;
2208                 fc->blocked = 0;
2209                 fuse_set_initialized(fc);
2210                 end_queued_requests(fc);
2211                 end_polls(fc);
2212                 wake_up_all(&fc->blocked_waitq);
2213                 spin_unlock(&fc->lock);
2214                 fuse_conn_put(fc);
2215         }
2216
2217         return 0;
2218 }
2219 EXPORT_SYMBOL_GPL(fuse_dev_release);
2220
2221 static int fuse_dev_fasync(int fd, struct file *file, int on)
2222 {
2223         struct fuse_conn *fc = fuse_get_conn(file);
2224         if (!fc)
2225                 return -EPERM;
2226
2227         /* No locking - fasync_helper does its own locking */
2228         return fasync_helper(fd, file, on, &fc->fasync);
2229 }
2230
2231 const struct file_operations fuse_dev_operations = {
2232         .owner          = THIS_MODULE,
2233         .open           = fuse_dev_open,
2234         .llseek         = no_llseek,
2235         .read           = do_sync_read,
2236         .aio_read       = fuse_dev_read,
2237         .splice_read    = fuse_dev_splice_read,
2238         .write          = do_sync_write,
2239         .aio_write      = fuse_dev_write,
2240         .splice_write   = fuse_dev_splice_write,
2241         .poll           = fuse_dev_poll,
2242         .release        = fuse_dev_release,
2243         .fasync         = fuse_dev_fasync,
2244 };
2245 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2246
2247 static struct miscdevice fuse_miscdevice = {
2248         .minor = FUSE_MINOR,
2249         .name  = "fuse",
2250         .fops = &fuse_dev_operations,
2251 };
2252
2253 int __init fuse_dev_init(void)
2254 {
2255         int err = -ENOMEM;
2256         fuse_req_cachep = kmem_cache_create("fuse_request",
2257                                             sizeof(struct fuse_req),
2258                                             0, 0, NULL);
2259         if (!fuse_req_cachep)
2260                 goto out;
2261
2262         err = misc_register(&fuse_miscdevice);
2263         if (err)
2264                 goto out_cache_clean;
2265
2266         return 0;
2267
2268  out_cache_clean:
2269         kmem_cache_destroy(fuse_req_cachep);
2270  out:
2271         return err;
2272 }
2273
2274 void fuse_dev_cleanup(void)
2275 {
2276         misc_deregister(&fuse_miscdevice);
2277         kmem_cache_destroy(fuse_req_cachep);
2278 }