2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static struct kmem_cache *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file->private_data;
33 static void fuse_request_init(struct fuse_req *req)
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 INIT_LIST_HEAD(&req->intr_entry);
38 init_waitqueue_head(&req->waitq);
39 atomic_set(&req->count, 1);
42 struct fuse_req *fuse_request_alloc(void)
44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
46 fuse_request_init(req);
50 struct fuse_req *fuse_request_alloc_nofs(void)
52 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
54 fuse_request_init(req);
58 void fuse_request_free(struct fuse_req *req)
60 kmem_cache_free(fuse_req_cachep, req);
63 static void block_sigs(sigset_t *oldset)
67 siginitsetinv(&mask, sigmask(SIGKILL));
68 sigprocmask(SIG_BLOCK, &mask, oldset);
71 static void restore_sigs(sigset_t *oldset)
73 sigprocmask(SIG_SETMASK, oldset, NULL);
76 static void __fuse_get_request(struct fuse_req *req)
78 atomic_inc(&req->count);
81 /* Must be called with > 1 refcount */
82 static void __fuse_put_request(struct fuse_req *req)
84 BUG_ON(atomic_read(&req->count) < 2);
85 atomic_dec(&req->count);
88 static void fuse_req_init_context(struct fuse_req *req)
90 req->in.h.uid = current->fsuid;
91 req->in.h.gid = current->fsgid;
92 req->in.h.pid = current->pid;
95 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
102 atomic_inc(&fc->num_waiting);
104 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
105 restore_sigs(&oldset);
114 req = fuse_request_alloc();
119 fuse_req_init_context(req);
124 atomic_dec(&fc->num_waiting);
129 * Return request in fuse_file->reserved_req. However that may
130 * currently be in use. If that is the case, wait for it to become
133 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
136 struct fuse_req *req = NULL;
137 struct fuse_file *ff = file->private_data;
140 wait_event(fc->reserved_req_waitq, ff->reserved_req);
141 spin_lock(&fc->lock);
142 if (ff->reserved_req) {
143 req = ff->reserved_req;
144 ff->reserved_req = NULL;
146 req->stolen_file = file;
148 spin_unlock(&fc->lock);
155 * Put stolen request back into fuse_file->reserved_req
157 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
159 struct file *file = req->stolen_file;
160 struct fuse_file *ff = file->private_data;
162 spin_lock(&fc->lock);
163 fuse_request_init(req);
164 BUG_ON(ff->reserved_req);
165 ff->reserved_req = req;
166 wake_up_all(&fc->reserved_req_waitq);
167 spin_unlock(&fc->lock);
172 * Gets a requests for a file operation, always succeeds
174 * This is used for sending the FLUSH request, which must get to
175 * userspace, due to POSIX locks which may need to be unlocked.
177 * If allocation fails due to OOM, use the reserved request in
180 * This is very unlikely to deadlock accidentally, since the
181 * filesystem should not have it's own file open. If deadlock is
182 * intentional, it can still be broken by "aborting" the filesystem.
184 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
186 struct fuse_req *req;
188 atomic_inc(&fc->num_waiting);
189 wait_event(fc->blocked_waitq, !fc->blocked);
190 req = fuse_request_alloc();
192 req = get_reserved_req(fc, file);
194 fuse_req_init_context(req);
199 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
201 if (atomic_dec_and_test(&req->count)) {
203 atomic_dec(&fc->num_waiting);
205 if (req->stolen_file)
206 put_reserved_req(fc, req);
208 fuse_request_free(req);
212 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
217 for (i = 0; i < numargs; i++)
218 nbytes += args[i].size;
223 static u64 fuse_get_unique(struct fuse_conn *fc)
226 /* zero is special */
233 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
235 req->in.h.unique = fuse_get_unique(fc);
236 req->in.h.len = sizeof(struct fuse_in_header) +
237 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
238 list_add_tail(&req->list, &fc->pending);
239 req->state = FUSE_REQ_PENDING;
242 atomic_inc(&fc->num_waiting);
245 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
248 static void flush_bg_queue(struct fuse_conn *fc)
250 while (fc->active_background < FUSE_MAX_BACKGROUND &&
251 !list_empty(&fc->bg_queue)) {
252 struct fuse_req *req;
254 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
255 list_del(&req->list);
256 fc->active_background++;
257 queue_request(fc, req);
262 * This function is called when a request is finished. Either a reply
263 * has arrived or it was aborted (and not yet sent) or some error
264 * occurred during communication with userspace, or the device file
265 * was closed. The requester thread is woken up (if still waiting),
266 * the 'end' callback is called if given, else the reference to the
267 * request is released
269 * Called with fc->lock, unlocks it
271 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
274 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
276 list_del(&req->list);
277 list_del(&req->intr_entry);
278 req->state = FUSE_REQ_FINISHED;
279 if (req->background) {
280 if (fc->num_background == FUSE_MAX_BACKGROUND) {
282 wake_up_all(&fc->blocked_waitq);
284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
285 clear_bdi_congested(&fc->bdi, READ);
286 clear_bdi_congested(&fc->bdi, WRITE);
288 fc->num_background--;
289 fc->active_background--;
292 spin_unlock(&fc->lock);
293 wake_up(&req->waitq);
297 fuse_put_request(fc, req);
300 static void wait_answer_interruptible(struct fuse_conn *fc,
301 struct fuse_req *req)
303 if (signal_pending(current))
306 spin_unlock(&fc->lock);
307 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
308 spin_lock(&fc->lock);
311 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
313 list_add_tail(&req->intr_entry, &fc->interrupts);
315 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
318 /* Called with fc->lock held. Releases, and then reacquires it. */
319 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
321 if (!fc->no_interrupt) {
322 /* Any signal may interrupt this */
323 wait_answer_interruptible(fc, req);
327 if (req->state == FUSE_REQ_FINISHED)
330 req->interrupted = 1;
331 if (req->state == FUSE_REQ_SENT)
332 queue_interrupt(fc, req);
338 /* Only fatal signals may interrupt this */
340 wait_answer_interruptible(fc, req);
341 restore_sigs(&oldset);
345 if (req->state == FUSE_REQ_FINISHED)
348 /* Request is not yet in userspace, bail out */
349 if (req->state == FUSE_REQ_PENDING) {
350 list_del(&req->list);
351 __fuse_put_request(req);
352 req->out.h.error = -EINTR;
358 * Either request is already in userspace, or it was forced.
361 spin_unlock(&fc->lock);
362 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
363 spin_lock(&fc->lock);
369 BUG_ON(req->state != FUSE_REQ_FINISHED);
371 /* This is uninterruptible sleep, because data is
372 being copied to/from the buffers of req. During
373 locked state, there mustn't be any filesystem
374 operation (e.g. page fault), since that could lead
376 spin_unlock(&fc->lock);
377 wait_event(req->waitq, !req->locked);
378 spin_lock(&fc->lock);
382 void request_send(struct fuse_conn *fc, struct fuse_req *req)
385 spin_lock(&fc->lock);
387 req->out.h.error = -ENOTCONN;
388 else if (fc->conn_error)
389 req->out.h.error = -ECONNREFUSED;
391 queue_request(fc, req);
392 /* acquire extra reference, since request is still needed
393 after request_end() */
394 __fuse_get_request(req);
396 request_wait_answer(fc, req);
398 spin_unlock(&fc->lock);
401 static void request_send_nowait_locked(struct fuse_conn *fc,
402 struct fuse_req *req)
405 fc->num_background++;
406 if (fc->num_background == FUSE_MAX_BACKGROUND)
408 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
409 set_bdi_congested(&fc->bdi, READ);
410 set_bdi_congested(&fc->bdi, WRITE);
412 list_add_tail(&req->list, &fc->bg_queue);
416 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
418 spin_lock(&fc->lock);
420 request_send_nowait_locked(fc, req);
421 spin_unlock(&fc->lock);
423 req->out.h.error = -ENOTCONN;
424 request_end(fc, req);
428 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
431 request_send_nowait(fc, req);
434 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
437 request_send_nowait(fc, req);
441 * Called under fc->lock
443 * fc->connected must have been checked previously
445 void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req)
448 request_send_nowait_locked(fc, req);
452 * Lock the request. Up to the next unlock_request() there mustn't be
453 * anything that could cause a page-fault. If the request was already
456 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
460 spin_lock(&fc->lock);
465 spin_unlock(&fc->lock);
471 * Unlock request. If it was aborted during being locked, the
472 * requester thread is currently waiting for it to be unlocked, so
475 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
478 spin_lock(&fc->lock);
481 wake_up(&req->waitq);
482 spin_unlock(&fc->lock);
486 struct fuse_copy_state {
487 struct fuse_conn *fc;
489 struct fuse_req *req;
490 const struct iovec *iov;
491 unsigned long nr_segs;
492 unsigned long seglen;
500 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
501 int write, struct fuse_req *req,
502 const struct iovec *iov, unsigned long nr_segs)
504 memset(cs, 0, sizeof(*cs));
509 cs->nr_segs = nr_segs;
512 /* Unmap and put previous page of userspace buffer */
513 static void fuse_copy_finish(struct fuse_copy_state *cs)
516 kunmap_atomic(cs->mapaddr, KM_USER0);
518 flush_dcache_page(cs->pg);
519 set_page_dirty_lock(cs->pg);
527 * Get another pagefull of userspace buffer, and map it to kernel
528 * address space, and lock request
530 static int fuse_copy_fill(struct fuse_copy_state *cs)
532 unsigned long offset;
535 unlock_request(cs->fc, cs->req);
536 fuse_copy_finish(cs);
538 BUG_ON(!cs->nr_segs);
539 cs->seglen = cs->iov[0].iov_len;
540 cs->addr = (unsigned long) cs->iov[0].iov_base;
544 down_read(¤t->mm->mmap_sem);
545 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
547 up_read(¤t->mm->mmap_sem);
551 offset = cs->addr % PAGE_SIZE;
552 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
553 cs->buf = cs->mapaddr + offset;
554 cs->len = min(PAGE_SIZE - offset, cs->seglen);
555 cs->seglen -= cs->len;
558 return lock_request(cs->fc, cs->req);
561 /* Do as much copy to/from userspace buffer as we can */
562 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
564 unsigned ncpy = min(*size, cs->len);
567 memcpy(cs->buf, *val, ncpy);
569 memcpy(*val, cs->buf, ncpy);
579 * Copy a page in the request to/from the userspace buffer. Must be
582 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
583 unsigned offset, unsigned count, int zeroing)
585 if (page && zeroing && count < PAGE_SIZE) {
586 void *mapaddr = kmap_atomic(page, KM_USER1);
587 memset(mapaddr, 0, PAGE_SIZE);
588 kunmap_atomic(mapaddr, KM_USER1);
592 if (!cs->len && (err = fuse_copy_fill(cs)))
595 void *mapaddr = kmap_atomic(page, KM_USER1);
596 void *buf = mapaddr + offset;
597 offset += fuse_copy_do(cs, &buf, &count);
598 kunmap_atomic(mapaddr, KM_USER1);
600 offset += fuse_copy_do(cs, NULL, &count);
602 if (page && !cs->write)
603 flush_dcache_page(page);
607 /* Copy pages in the request to/from userspace buffer */
608 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
612 struct fuse_req *req = cs->req;
613 unsigned offset = req->page_offset;
614 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
616 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
617 struct page *page = req->pages[i];
618 int err = fuse_copy_page(cs, page, offset, count, zeroing);
623 count = min(nbytes, (unsigned) PAGE_SIZE);
629 /* Copy a single argument in the request to/from userspace buffer */
630 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
634 if (!cs->len && (err = fuse_copy_fill(cs)))
636 fuse_copy_do(cs, &val, &size);
641 /* Copy request arguments to/from userspace buffer */
642 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
643 unsigned argpages, struct fuse_arg *args,
649 for (i = 0; !err && i < numargs; i++) {
650 struct fuse_arg *arg = &args[i];
651 if (i == numargs - 1 && argpages)
652 err = fuse_copy_pages(cs, arg->size, zeroing);
654 err = fuse_copy_one(cs, arg->value, arg->size);
659 static int request_pending(struct fuse_conn *fc)
661 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
664 /* Wait until a request is available on the pending list */
665 static void request_wait(struct fuse_conn *fc)
667 DECLARE_WAITQUEUE(wait, current);
669 add_wait_queue_exclusive(&fc->waitq, &wait);
670 while (fc->connected && !request_pending(fc)) {
671 set_current_state(TASK_INTERRUPTIBLE);
672 if (signal_pending(current))
675 spin_unlock(&fc->lock);
677 spin_lock(&fc->lock);
679 set_current_state(TASK_RUNNING);
680 remove_wait_queue(&fc->waitq, &wait);
684 * Transfer an interrupt request to userspace
686 * Unlike other requests this is assembled on demand, without a need
687 * to allocate a separate fuse_req structure.
689 * Called with fc->lock held, releases it
691 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
692 const struct iovec *iov, unsigned long nr_segs)
695 struct fuse_copy_state cs;
696 struct fuse_in_header ih;
697 struct fuse_interrupt_in arg;
698 unsigned reqsize = sizeof(ih) + sizeof(arg);
701 list_del_init(&req->intr_entry);
702 req->intr_unique = fuse_get_unique(fc);
703 memset(&ih, 0, sizeof(ih));
704 memset(&arg, 0, sizeof(arg));
706 ih.opcode = FUSE_INTERRUPT;
707 ih.unique = req->intr_unique;
708 arg.unique = req->in.h.unique;
710 spin_unlock(&fc->lock);
711 if (iov_length(iov, nr_segs) < reqsize)
714 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
715 err = fuse_copy_one(&cs, &ih, sizeof(ih));
717 err = fuse_copy_one(&cs, &arg, sizeof(arg));
718 fuse_copy_finish(&cs);
720 return err ? err : reqsize;
724 * Read a single request into the userspace filesystem's buffer. This
725 * function waits until a request is available, then removes it from
726 * the pending list and copies request data to userspace buffer. If
727 * no reply is needed (FORGET) or request has been aborted or there
728 * was an error during the copying then it's finished by calling
729 * request_end(). Otherwise add it to the processing list, and set
732 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
733 unsigned long nr_segs, loff_t pos)
736 struct fuse_req *req;
738 struct fuse_copy_state cs;
740 struct file *file = iocb->ki_filp;
741 struct fuse_conn *fc = fuse_get_conn(file);
746 spin_lock(&fc->lock);
748 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
749 !request_pending(fc))
757 if (!request_pending(fc))
760 if (!list_empty(&fc->interrupts)) {
761 req = list_entry(fc->interrupts.next, struct fuse_req,
763 return fuse_read_interrupt(fc, req, iov, nr_segs);
766 req = list_entry(fc->pending.next, struct fuse_req, list);
767 req->state = FUSE_REQ_READING;
768 list_move(&req->list, &fc->io);
772 /* If request is too large, reply with an error and restart the read */
773 if (iov_length(iov, nr_segs) < reqsize) {
774 req->out.h.error = -EIO;
775 /* SETXATTR is special, since it may contain too large data */
776 if (in->h.opcode == FUSE_SETXATTR)
777 req->out.h.error = -E2BIG;
778 request_end(fc, req);
781 spin_unlock(&fc->lock);
782 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
783 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
785 err = fuse_copy_args(&cs, in->numargs, in->argpages,
786 (struct fuse_arg *) in->args, 0);
787 fuse_copy_finish(&cs);
788 spin_lock(&fc->lock);
791 request_end(fc, req);
795 req->out.h.error = -EIO;
796 request_end(fc, req);
800 request_end(fc, req);
802 req->state = FUSE_REQ_SENT;
803 list_move_tail(&req->list, &fc->processing);
804 if (req->interrupted)
805 queue_interrupt(fc, req);
806 spin_unlock(&fc->lock);
811 spin_unlock(&fc->lock);
815 /* Look up request on processing list by unique ID */
816 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
818 struct list_head *entry;
820 list_for_each(entry, &fc->processing) {
821 struct fuse_req *req;
822 req = list_entry(entry, struct fuse_req, list);
823 if (req->in.h.unique == unique || req->intr_unique == unique)
829 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
832 unsigned reqsize = sizeof(struct fuse_out_header);
835 return nbytes != reqsize ? -EINVAL : 0;
837 reqsize += len_args(out->numargs, out->args);
839 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
841 else if (reqsize > nbytes) {
842 struct fuse_arg *lastarg = &out->args[out->numargs-1];
843 unsigned diffsize = reqsize - nbytes;
844 if (diffsize > lastarg->size)
846 lastarg->size -= diffsize;
848 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
853 * Write a single reply to a request. First the header is copied from
854 * the write buffer. The request is then searched on the processing
855 * list by the unique ID found in the header. If found, then remove
856 * it from the list and copy the rest of the buffer to the request.
857 * The request is finished by calling request_end()
859 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
860 unsigned long nr_segs, loff_t pos)
863 unsigned nbytes = iov_length(iov, nr_segs);
864 struct fuse_req *req;
865 struct fuse_out_header oh;
866 struct fuse_copy_state cs;
867 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
871 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
872 if (nbytes < sizeof(struct fuse_out_header))
875 err = fuse_copy_one(&cs, &oh, sizeof(oh));
879 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
883 spin_lock(&fc->lock);
888 req = request_find(fc, oh.unique);
893 spin_unlock(&fc->lock);
894 fuse_copy_finish(&cs);
895 spin_lock(&fc->lock);
896 request_end(fc, req);
899 /* Is it an interrupt reply? */
900 if (req->intr_unique == oh.unique) {
902 if (nbytes != sizeof(struct fuse_out_header))
905 if (oh.error == -ENOSYS)
906 fc->no_interrupt = 1;
907 else if (oh.error == -EAGAIN)
908 queue_interrupt(fc, req);
910 spin_unlock(&fc->lock);
911 fuse_copy_finish(&cs);
915 req->state = FUSE_REQ_WRITING;
916 list_move(&req->list, &fc->io);
920 spin_unlock(&fc->lock);
922 err = copy_out_args(&cs, &req->out, nbytes);
923 fuse_copy_finish(&cs);
925 spin_lock(&fc->lock);
930 } else if (!req->aborted)
931 req->out.h.error = -EIO;
932 request_end(fc, req);
934 return err ? err : nbytes;
937 spin_unlock(&fc->lock);
939 fuse_copy_finish(&cs);
943 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
945 unsigned mask = POLLOUT | POLLWRNORM;
946 struct fuse_conn *fc = fuse_get_conn(file);
950 poll_wait(file, &fc->waitq, wait);
952 spin_lock(&fc->lock);
955 else if (request_pending(fc))
956 mask |= POLLIN | POLLRDNORM;
957 spin_unlock(&fc->lock);
963 * Abort all requests on the given list (pending or processing)
965 * This function releases and reacquires fc->lock
967 static void end_requests(struct fuse_conn *fc, struct list_head *head)
969 while (!list_empty(head)) {
970 struct fuse_req *req;
971 req = list_entry(head->next, struct fuse_req, list);
972 req->out.h.error = -ECONNABORTED;
973 request_end(fc, req);
974 spin_lock(&fc->lock);
979 * Abort requests under I/O
981 * The requests are set to aborted and finished, and the request
982 * waiter is woken up. This will make request_wait_answer() wait
983 * until the request is unlocked and then return.
985 * If the request is asynchronous, then the end function needs to be
986 * called after waiting for the request to be unlocked (if it was
989 static void end_io_requests(struct fuse_conn *fc)
991 while (!list_empty(&fc->io)) {
992 struct fuse_req *req =
993 list_entry(fc->io.next, struct fuse_req, list);
994 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
997 req->out.h.error = -ECONNABORTED;
998 req->state = FUSE_REQ_FINISHED;
999 list_del_init(&req->list);
1000 wake_up(&req->waitq);
1003 /* The end function will consume this reference */
1004 __fuse_get_request(req);
1005 spin_unlock(&fc->lock);
1006 wait_event(req->waitq, !req->locked);
1008 spin_lock(&fc->lock);
1014 * Abort all requests.
1016 * Emergency exit in case of a malicious or accidental deadlock, or
1017 * just a hung filesystem.
1019 * The same effect is usually achievable through killing the
1020 * filesystem daemon and all users of the filesystem. The exception
1021 * is the combination of an asynchronous request and the tricky
1022 * deadlock (see Documentation/filesystems/fuse.txt).
1024 * During the aborting, progression of requests from the pending and
1025 * processing lists onto the io list, and progression of new requests
1026 * onto the pending list is prevented by req->connected being false.
1028 * Progression of requests under I/O to the processing list is
1029 * prevented by the req->aborted flag being true for these requests.
1030 * For this reason requests on the io list must be aborted first.
1032 void fuse_abort_conn(struct fuse_conn *fc)
1034 spin_lock(&fc->lock);
1035 if (fc->connected) {
1038 end_io_requests(fc);
1039 end_requests(fc, &fc->pending);
1040 end_requests(fc, &fc->processing);
1041 wake_up_all(&fc->waitq);
1042 wake_up_all(&fc->blocked_waitq);
1043 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1045 spin_unlock(&fc->lock);
1048 static int fuse_dev_release(struct inode *inode, struct file *file)
1050 struct fuse_conn *fc = fuse_get_conn(file);
1052 spin_lock(&fc->lock);
1054 end_requests(fc, &fc->pending);
1055 end_requests(fc, &fc->processing);
1056 spin_unlock(&fc->lock);
1057 fasync_helper(-1, file, 0, &fc->fasync);
1064 static int fuse_dev_fasync(int fd, struct file *file, int on)
1066 struct fuse_conn *fc = fuse_get_conn(file);
1070 /* No locking - fasync_helper does its own locking */
1071 return fasync_helper(fd, file, on, &fc->fasync);
1074 const struct file_operations fuse_dev_operations = {
1075 .owner = THIS_MODULE,
1076 .llseek = no_llseek,
1077 .read = do_sync_read,
1078 .aio_read = fuse_dev_read,
1079 .write = do_sync_write,
1080 .aio_write = fuse_dev_write,
1081 .poll = fuse_dev_poll,
1082 .release = fuse_dev_release,
1083 .fasync = fuse_dev_fasync,
1086 static struct miscdevice fuse_miscdevice = {
1087 .minor = FUSE_MINOR,
1089 .fops = &fuse_dev_operations,
1092 int __init fuse_dev_init(void)
1095 fuse_req_cachep = kmem_cache_create("fuse_request",
1096 sizeof(struct fuse_req),
1098 if (!fuse_req_cachep)
1101 err = misc_register(&fuse_miscdevice);
1103 goto out_cache_clean;
1108 kmem_cache_destroy(fuse_req_cachep);
1113 void fuse_dev_cleanup(void)
1115 misc_deregister(&fuse_miscdevice);
1116 kmem_cache_destroy(fuse_req_cachep);