2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
134 scm->secid = *UNIXSID(skb);
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n)
152 unsigned hash = (__force unsigned)n;
155 return hash&(UNIX_HASH_SIZE-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
162 return unix_peer(osk) == sk;
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 static inline int unix_recvq_full(struct sock const *sk)
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 static struct sock *unix_peer_get(struct sock *s)
183 unix_state_unlock(s);
187 static inline void unix_release_addr(struct unix_address *addr)
189 if (atomic_dec_and_test(&addr->refcnt))
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
206 if (sunaddr->sun_path[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
223 static void __unix_remove_socket(struct sock *sk)
225 sk_del_node_init(sk);
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
234 static inline void unix_remove_socket(struct sock *sk)
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!net_eq(sock_net(s), net))
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
281 spin_unlock(&unix_table_lock);
285 static struct sock *unix_find_socket_byinode(struct inode *i)
288 struct hlist_node *node;
290 spin_lock(&unix_table_lock);
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
295 if (dentry && dentry->d_inode == i) {
302 spin_unlock(&unix_table_lock);
306 static inline int unix_writable(struct sock *sk)
308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
311 static void unix_write_space(struct sock *sk)
313 struct socket_wq *wq;
316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync(&wq->wait);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
325 /* When dgram socket disconnects (or changes its peer), we clear its receive
326 * queue of packets arrived from previous peer. First, it allows to do
327 * flow control based only on wmem_alloc; second, sk connected to peer
328 * may receive messages only from that peer. */
329 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331 if (!skb_queue_empty(&sk->sk_receive_queue)) {
332 skb_queue_purge(&sk->sk_receive_queue);
333 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335 /* If one link of bidirectional dgram pipe is disconnected,
336 * we signal error. Messages are lost. Do not make this,
337 * when peer was not connected to us.
339 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
340 other->sk_err = ECONNRESET;
341 other->sk_error_report(other);
346 static void unix_sock_destructor(struct sock *sk)
348 struct unix_sock *u = unix_sk(sk);
350 skb_queue_purge(&sk->sk_receive_queue);
352 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
353 WARN_ON(!sk_unhashed(sk));
354 WARN_ON(sk->sk_socket);
355 if (!sock_flag(sk, SOCK_DEAD)) {
356 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
361 unix_release_addr(u->addr);
363 atomic_dec(&unix_nr_socks);
365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
367 #ifdef UNIX_REFCNT_DEBUG
368 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
369 atomic_read(&unix_nr_socks));
373 static int unix_release_sock(struct sock *sk, int embrion)
375 struct unix_sock *u = unix_sk(sk);
376 struct dentry *dentry;
377 struct vfsmount *mnt;
382 unix_remove_socket(sk);
387 sk->sk_shutdown = SHUTDOWN_MASK;
392 state = sk->sk_state;
393 sk->sk_state = TCP_CLOSE;
394 unix_state_unlock(sk);
396 wake_up_interruptible_all(&u->peer_wait);
398 skpair = unix_peer(sk);
400 if (skpair != NULL) {
401 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
402 unix_state_lock(skpair);
404 skpair->sk_shutdown = SHUTDOWN_MASK;
405 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
406 skpair->sk_err = ECONNRESET;
407 unix_state_unlock(skpair);
408 skpair->sk_state_change(skpair);
409 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 sock_put(skpair); /* It may now die */
412 unix_peer(sk) = NULL;
415 /* Try to flush out this socket. Throw out buffers at least */
417 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
418 if (state == TCP_LISTEN)
419 unix_release_sock(skb->sk, 1);
420 /* passed fds are erased in the kfree_skb hook */
431 /* ---- Socket is dead now and most probably destroyed ---- */
434 * Fixme: BSD difference: In BSD all sockets connected to use get
435 * ECONNRESET and we die on the spot. In Linux we behave
436 * like files and pipes do and wait for the last
439 * Can't we simply set sock->err?
441 * What the above comment does talk about? --ANK(980817)
444 if (unix_tot_inflight)
445 unix_gc(); /* Garbage collect fds */
450 static void init_peercred(struct sock *sk)
452 put_pid(sk->sk_peer_pid);
453 if (sk->sk_peer_cred)
454 put_cred(sk->sk_peer_cred);
455 sk->sk_peer_pid = get_pid(task_tgid(current));
456 sk->sk_peer_cred = get_current_cred();
459 static void copy_peercred(struct sock *sk, struct sock *peersk)
461 put_pid(sk->sk_peer_pid);
462 if (sk->sk_peer_cred)
463 put_cred(sk->sk_peer_cred);
464 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
465 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
468 static int unix_listen(struct socket *sock, int backlog)
471 struct sock *sk = sock->sk;
472 struct unix_sock *u = unix_sk(sk);
473 struct pid *old_pid = NULL;
474 const struct cred *old_cred = NULL;
477 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
478 goto out; /* Only stream/seqpacket sockets accept */
481 goto out; /* No listens on an unbound socket */
483 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485 if (backlog > sk->sk_max_ack_backlog)
486 wake_up_interruptible_all(&u->peer_wait);
487 sk->sk_max_ack_backlog = backlog;
488 sk->sk_state = TCP_LISTEN;
489 /* set credentials so connect can copy them */
494 unix_state_unlock(sk);
502 static int unix_release(struct socket *);
503 static int unix_bind(struct socket *, struct sockaddr *, int);
504 static int unix_stream_connect(struct socket *, struct sockaddr *,
505 int addr_len, int flags);
506 static int unix_socketpair(struct socket *, struct socket *);
507 static int unix_accept(struct socket *, struct socket *, int);
508 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
509 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
510 static unsigned int unix_dgram_poll(struct file *, struct socket *,
512 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
513 static int unix_shutdown(struct socket *, int);
514 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
515 struct msghdr *, size_t);
516 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
517 struct msghdr *, size_t, int);
518 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
519 struct msghdr *, size_t);
520 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
521 struct msghdr *, size_t, int);
522 static int unix_dgram_connect(struct socket *, struct sockaddr *,
524 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
525 struct msghdr *, size_t);
527 static const struct proto_ops unix_stream_ops = {
529 .owner = THIS_MODULE,
530 .release = unix_release,
532 .connect = unix_stream_connect,
533 .socketpair = unix_socketpair,
534 .accept = unix_accept,
535 .getname = unix_getname,
538 .listen = unix_listen,
539 .shutdown = unix_shutdown,
540 .setsockopt = sock_no_setsockopt,
541 .getsockopt = sock_no_getsockopt,
542 .sendmsg = unix_stream_sendmsg,
543 .recvmsg = unix_stream_recvmsg,
544 .mmap = sock_no_mmap,
545 .sendpage = sock_no_sendpage,
548 static const struct proto_ops unix_dgram_ops = {
550 .owner = THIS_MODULE,
551 .release = unix_release,
553 .connect = unix_dgram_connect,
554 .socketpair = unix_socketpair,
555 .accept = sock_no_accept,
556 .getname = unix_getname,
557 .poll = unix_dgram_poll,
559 .listen = sock_no_listen,
560 .shutdown = unix_shutdown,
561 .setsockopt = sock_no_setsockopt,
562 .getsockopt = sock_no_getsockopt,
563 .sendmsg = unix_dgram_sendmsg,
564 .recvmsg = unix_dgram_recvmsg,
565 .mmap = sock_no_mmap,
566 .sendpage = sock_no_sendpage,
569 static const struct proto_ops unix_seqpacket_ops = {
571 .owner = THIS_MODULE,
572 .release = unix_release,
574 .connect = unix_stream_connect,
575 .socketpair = unix_socketpair,
576 .accept = unix_accept,
577 .getname = unix_getname,
578 .poll = unix_dgram_poll,
580 .listen = unix_listen,
581 .shutdown = unix_shutdown,
582 .setsockopt = sock_no_setsockopt,
583 .getsockopt = sock_no_getsockopt,
584 .sendmsg = unix_seqpacket_sendmsg,
585 .recvmsg = unix_dgram_recvmsg,
586 .mmap = sock_no_mmap,
587 .sendpage = sock_no_sendpage,
590 static struct proto unix_proto = {
592 .owner = THIS_MODULE,
593 .obj_size = sizeof(struct unix_sock),
597 * AF_UNIX sockets do not interact with hardware, hence they
598 * dont trigger interrupts - so it's safe for them to have
599 * bh-unsafe locking for their sk_receive_queue.lock. Split off
600 * this special lock-class by reinitializing the spinlock key:
602 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
604 static struct sock *unix_create1(struct net *net, struct socket *sock)
606 struct sock *sk = NULL;
609 atomic_inc(&unix_nr_socks);
610 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
613 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
617 sock_init_data(sock, sk);
618 lockdep_set_class(&sk->sk_receive_queue.lock,
619 &af_unix_sk_receive_queue_lock_key);
621 sk->sk_write_space = unix_write_space;
622 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
623 sk->sk_destruct = unix_sock_destructor;
627 spin_lock_init(&u->lock);
628 atomic_long_set(&u->inflight, 0);
629 INIT_LIST_HEAD(&u->link);
630 mutex_init(&u->readlock); /* single task reading lock */
631 init_waitqueue_head(&u->peer_wait);
632 unix_insert_socket(unix_sockets_unbound, sk);
635 atomic_dec(&unix_nr_socks);
638 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
644 static int unix_create(struct net *net, struct socket *sock, int protocol,
647 if (protocol && protocol != PF_UNIX)
648 return -EPROTONOSUPPORT;
650 sock->state = SS_UNCONNECTED;
652 switch (sock->type) {
654 sock->ops = &unix_stream_ops;
657 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
661 sock->type = SOCK_DGRAM;
663 sock->ops = &unix_dgram_ops;
666 sock->ops = &unix_seqpacket_ops;
669 return -ESOCKTNOSUPPORT;
672 return unix_create1(net, sock) ? 0 : -ENOMEM;
675 static int unix_release(struct socket *sock)
677 struct sock *sk = sock->sk;
684 return unix_release_sock(sk, 0);
687 static int unix_autobind(struct socket *sock)
689 struct sock *sk = sock->sk;
690 struct net *net = sock_net(sk);
691 struct unix_sock *u = unix_sk(sk);
692 static u32 ordernum = 1;
693 struct unix_address *addr;
695 unsigned int retries = 0;
697 mutex_lock(&u->readlock);
704 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
708 addr->name->sun_family = AF_UNIX;
709 atomic_set(&addr->refcnt, 1);
712 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
713 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
715 spin_lock(&unix_table_lock);
716 ordernum = (ordernum+1)&0xFFFFF;
718 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
720 spin_unlock(&unix_table_lock);
722 * __unix_find_socket_byname() may take long time if many names
723 * are already in use.
726 /* Give up if all names seems to be in use. */
727 if (retries++ == 0xFFFFF) {
734 addr->hash ^= sk->sk_type;
736 __unix_remove_socket(sk);
738 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
739 spin_unlock(&unix_table_lock);
742 out: mutex_unlock(&u->readlock);
746 static struct sock *unix_find_other(struct net *net,
747 struct sockaddr_un *sunname, int len,
748 int type, unsigned hash, int *error)
754 if (sunname->sun_path[0]) {
756 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
759 inode = path.dentry->d_inode;
760 err = inode_permission(inode, MAY_WRITE);
765 if (!S_ISSOCK(inode->i_mode))
767 u = unix_find_socket_byinode(inode);
771 if (u->sk_type == type)
772 touch_atime(path.mnt, path.dentry);
777 if (u->sk_type != type) {
783 u = unix_find_socket_byname(net, sunname, len, type, hash);
785 struct dentry *dentry;
786 dentry = unix_sk(u)->dentry;
788 touch_atime(unix_sk(u)->mnt, dentry);
802 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
804 struct sock *sk = sock->sk;
805 struct net *net = sock_net(sk);
806 struct unix_sock *u = unix_sk(sk);
807 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
808 struct dentry *dentry = NULL;
812 struct unix_address *addr;
813 struct hlist_head *list;
816 if (sunaddr->sun_family != AF_UNIX)
819 if (addr_len == sizeof(short)) {
820 err = unix_autobind(sock);
824 err = unix_mkname(sunaddr, addr_len, &hash);
829 mutex_lock(&u->readlock);
836 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
840 memcpy(addr->name, sunaddr, addr_len);
841 addr->len = addr_len;
842 addr->hash = hash ^ sk->sk_type;
843 atomic_set(&addr->refcnt, 1);
845 if (sunaddr->sun_path[0]) {
849 * Get the parent directory, calculate the hash for last
852 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
854 goto out_mknod_parent;
856 dentry = lookup_create(&nd, 0);
857 err = PTR_ERR(dentry);
859 goto out_mknod_unlock;
862 * All right, let's create it.
865 (SOCK_INODE(sock)->i_mode & ~current_umask());
866 err = mnt_want_write(nd.path.mnt);
869 err = security_path_mknod(&nd.path, dentry, mode, 0);
871 goto out_mknod_drop_write;
872 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
873 out_mknod_drop_write:
874 mnt_drop_write(nd.path.mnt);
877 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
878 dput(nd.path.dentry);
879 nd.path.dentry = dentry;
881 addr->hash = UNIX_HASH_SIZE;
884 spin_lock(&unix_table_lock);
886 if (!sunaddr->sun_path[0]) {
888 if (__unix_find_socket_byname(net, sunaddr, addr_len,
889 sk->sk_type, hash)) {
890 unix_release_addr(addr);
894 list = &unix_socket_table[addr->hash];
896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
897 u->dentry = nd.path.dentry;
898 u->mnt = nd.path.mnt;
902 __unix_remove_socket(sk);
904 __unix_insert_socket(list, sk);
907 spin_unlock(&unix_table_lock);
909 mutex_unlock(&u->readlock);
916 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
921 unix_release_addr(addr);
925 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
927 if (unlikely(sk1 == sk2) || !sk2) {
928 unix_state_lock(sk1);
932 unix_state_lock(sk1);
933 unix_state_lock_nested(sk2);
935 unix_state_lock(sk2);
936 unix_state_lock_nested(sk1);
940 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
942 if (unlikely(sk1 == sk2) || !sk2) {
943 unix_state_unlock(sk1);
946 unix_state_unlock(sk1);
947 unix_state_unlock(sk2);
950 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
953 struct sock *sk = sock->sk;
954 struct net *net = sock_net(sk);
955 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
960 if (addr->sa_family != AF_UNSPEC) {
961 err = unix_mkname(sunaddr, alen, &hash);
966 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
967 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
971 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
975 unix_state_double_lock(sk, other);
977 /* Apparently VFS overslept socket death. Retry. */
978 if (sock_flag(other, SOCK_DEAD)) {
979 unix_state_double_unlock(sk, other);
985 if (!unix_may_send(sk, other))
988 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
994 * 1003.1g breaking connected state with AF_UNSPEC
997 unix_state_double_lock(sk, other);
1001 * If it was connected, reconnect.
1003 if (unix_peer(sk)) {
1004 struct sock *old_peer = unix_peer(sk);
1005 unix_peer(sk) = other;
1006 unix_state_double_unlock(sk, other);
1008 if (other != old_peer)
1009 unix_dgram_disconnected(sk, old_peer);
1012 unix_peer(sk) = other;
1013 unix_state_double_unlock(sk, other);
1018 unix_state_double_unlock(sk, other);
1024 static long unix_wait_for_peer(struct sock *other, long timeo)
1026 struct unix_sock *u = unix_sk(other);
1030 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1032 sched = !sock_flag(other, SOCK_DEAD) &&
1033 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1034 unix_recvq_full(other);
1036 unix_state_unlock(other);
1039 timeo = schedule_timeout(timeo);
1041 finish_wait(&u->peer_wait, &wait);
1045 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1046 int addr_len, int flags)
1048 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1049 struct sock *sk = sock->sk;
1050 struct net *net = sock_net(sk);
1051 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1052 struct sock *newsk = NULL;
1053 struct sock *other = NULL;
1054 struct sk_buff *skb = NULL;
1060 err = unix_mkname(sunaddr, addr_len, &hash);
1065 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1066 (err = unix_autobind(sock)) != 0)
1069 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1071 /* First of all allocate resources.
1072 If we will make it after state is locked,
1073 we will have to recheck all again in any case.
1078 /* create new sock for complete connection */
1079 newsk = unix_create1(sock_net(sk), NULL);
1083 /* Allocate skb for sending to listening sock */
1084 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1089 /* Find listening sock. */
1090 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1094 /* Latch state of peer */
1095 unix_state_lock(other);
1097 /* Apparently VFS overslept socket death. Retry. */
1098 if (sock_flag(other, SOCK_DEAD)) {
1099 unix_state_unlock(other);
1104 err = -ECONNREFUSED;
1105 if (other->sk_state != TCP_LISTEN)
1107 if (other->sk_shutdown & RCV_SHUTDOWN)
1110 if (unix_recvq_full(other)) {
1115 timeo = unix_wait_for_peer(other, timeo);
1117 err = sock_intr_errno(timeo);
1118 if (signal_pending(current))
1126 It is tricky place. We need to grab write lock and cannot
1127 drop lock on peer. It is dangerous because deadlock is
1128 possible. Connect to self case and simultaneous
1129 attempt to connect are eliminated by checking socket
1130 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1131 check this before attempt to grab lock.
1133 Well, and we have to recheck the state after socket locked.
1139 /* This is ok... continue with connect */
1141 case TCP_ESTABLISHED:
1142 /* Socket is already connected */
1150 unix_state_lock_nested(sk);
1152 if (sk->sk_state != st) {
1153 unix_state_unlock(sk);
1154 unix_state_unlock(other);
1159 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1161 unix_state_unlock(sk);
1165 /* The way is open! Fastly set all the necessary fields... */
1168 unix_peer(newsk) = sk;
1169 newsk->sk_state = TCP_ESTABLISHED;
1170 newsk->sk_type = sk->sk_type;
1171 init_peercred(newsk);
1172 newu = unix_sk(newsk);
1173 newsk->sk_wq = &newu->peer_wq;
1174 otheru = unix_sk(other);
1176 /* copy address information from listening to new sock*/
1178 atomic_inc(&otheru->addr->refcnt);
1179 newu->addr = otheru->addr;
1181 if (otheru->dentry) {
1182 newu->dentry = dget(otheru->dentry);
1183 newu->mnt = mntget(otheru->mnt);
1186 /* Set credentials */
1187 copy_peercred(sk, other);
1189 sock->state = SS_CONNECTED;
1190 sk->sk_state = TCP_ESTABLISHED;
1193 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1194 unix_peer(sk) = newsk;
1196 unix_state_unlock(sk);
1198 /* take ten and and send info to listening sock */
1199 spin_lock(&other->sk_receive_queue.lock);
1200 __skb_queue_tail(&other->sk_receive_queue, skb);
1201 spin_unlock(&other->sk_receive_queue.lock);
1202 unix_state_unlock(other);
1203 other->sk_data_ready(other, 0);
1209 unix_state_unlock(other);
1214 unix_release_sock(newsk, 0);
1220 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1222 struct sock *ska = socka->sk, *skb = sockb->sk;
1224 /* Join our sockets back to back */
1227 unix_peer(ska) = skb;
1228 unix_peer(skb) = ska;
1232 if (ska->sk_type != SOCK_DGRAM) {
1233 ska->sk_state = TCP_ESTABLISHED;
1234 skb->sk_state = TCP_ESTABLISHED;
1235 socka->state = SS_CONNECTED;
1236 sockb->state = SS_CONNECTED;
1241 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1243 struct sock *sk = sock->sk;
1245 struct sk_buff *skb;
1249 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1253 if (sk->sk_state != TCP_LISTEN)
1256 /* If socket state is TCP_LISTEN it cannot change (for now...),
1257 * so that no locks are necessary.
1260 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1262 /* This means receive shutdown. */
1269 skb_free_datagram(sk, skb);
1270 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1272 /* attach accepted sock to socket */
1273 unix_state_lock(tsk);
1274 newsock->state = SS_CONNECTED;
1275 sock_graft(tsk, newsock);
1276 unix_state_unlock(tsk);
1284 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1286 struct sock *sk = sock->sk;
1287 struct unix_sock *u;
1288 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1292 sk = unix_peer_get(sk);
1303 unix_state_lock(sk);
1305 sunaddr->sun_family = AF_UNIX;
1306 sunaddr->sun_path[0] = 0;
1307 *uaddr_len = sizeof(short);
1309 struct unix_address *addr = u->addr;
1311 *uaddr_len = addr->len;
1312 memcpy(sunaddr, addr->name, *uaddr_len);
1314 unix_state_unlock(sk);
1320 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1324 scm->fp = UNIXCB(skb).fp;
1325 UNIXCB(skb).fp = NULL;
1327 for (i = scm->fp->count-1; i >= 0; i--)
1328 unix_notinflight(scm->fp->fp[i]);
1331 static void unix_destruct_scm(struct sk_buff *skb)
1333 struct scm_cookie scm;
1334 memset(&scm, 0, sizeof(scm));
1335 scm.pid = UNIXCB(skb).pid;
1336 scm.cred = UNIXCB(skb).cred;
1338 unix_detach_fds(&scm, skb);
1340 /* Alas, it calls VFS */
1341 /* So fscking what? fput() had been SMP-safe since the last Summer */
1346 #define MAX_RECURSION_LEVEL 4
1348 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1351 unsigned char max_level = 0;
1352 int unix_sock_count = 0;
1354 for (i = scm->fp->count - 1; i >= 0; i--) {
1355 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1359 max_level = max(max_level,
1360 unix_sk(sk)->recursion_level);
1363 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1364 return -ETOOMANYREFS;
1367 * Need to duplicate file references for the sake of garbage
1368 * collection. Otherwise a socket in the fps might become a
1369 * candidate for GC while the skb is not yet queued.
1371 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1372 if (!UNIXCB(skb).fp)
1375 if (unix_sock_count) {
1376 for (i = scm->fp->count - 1; i >= 0; i--)
1377 unix_inflight(scm->fp->fp[i]);
1382 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1385 UNIXCB(skb).pid = get_pid(scm->pid);
1386 UNIXCB(skb).cred = get_cred(scm->cred);
1387 UNIXCB(skb).fp = NULL;
1388 if (scm->fp && send_fds)
1389 err = unix_attach_fds(scm, skb);
1391 skb->destructor = unix_destruct_scm;
1396 * Send AF_UNIX data.
1399 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1400 struct msghdr *msg, size_t len)
1402 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1403 struct sock *sk = sock->sk;
1404 struct net *net = sock_net(sk);
1405 struct unix_sock *u = unix_sk(sk);
1406 struct sockaddr_un *sunaddr = msg->msg_name;
1407 struct sock *other = NULL;
1408 int namelen = 0; /* fake GCC */
1411 struct sk_buff *skb;
1413 struct scm_cookie tmp_scm;
1416 if (NULL == siocb->scm)
1417 siocb->scm = &tmp_scm;
1419 err = scm_send(sock, msg, siocb->scm);
1424 if (msg->msg_flags&MSG_OOB)
1427 if (msg->msg_namelen) {
1428 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1435 other = unix_peer_get(sk);
1440 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1441 && (err = unix_autobind(sock)) != 0)
1445 if (len > sk->sk_sndbuf - 32)
1448 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1452 err = unix_scm_to_skb(siocb->scm, skb, true);
1455 max_level = err + 1;
1456 unix_get_secdata(siocb->scm, skb);
1458 skb_reset_transport_header(skb);
1459 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1463 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1468 if (sunaddr == NULL)
1471 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1477 unix_state_lock(other);
1479 if (!unix_may_send(sk, other))
1482 if (sock_flag(other, SOCK_DEAD)) {
1484 * Check with 1003.1g - what should
1487 unix_state_unlock(other);
1491 unix_state_lock(sk);
1492 if (unix_peer(sk) == other) {
1493 unix_peer(sk) = NULL;
1494 unix_state_unlock(sk);
1496 unix_dgram_disconnected(sk, other);
1498 err = -ECONNREFUSED;
1500 unix_state_unlock(sk);
1510 if (other->sk_shutdown & RCV_SHUTDOWN)
1513 if (sk->sk_type != SOCK_SEQPACKET) {
1514 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1519 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1525 timeo = unix_wait_for_peer(other, timeo);
1527 err = sock_intr_errno(timeo);
1528 if (signal_pending(current))
1534 skb_queue_tail(&other->sk_receive_queue, skb);
1535 if (max_level > unix_sk(other)->recursion_level)
1536 unix_sk(other)->recursion_level = max_level;
1537 unix_state_unlock(other);
1538 other->sk_data_ready(other, len);
1540 scm_destroy(siocb->scm);
1544 unix_state_unlock(other);
1550 scm_destroy(siocb->scm);
1555 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1556 struct msghdr *msg, size_t len)
1558 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1559 struct sock *sk = sock->sk;
1560 struct sock *other = NULL;
1561 struct sockaddr_un *sunaddr = msg->msg_name;
1563 struct sk_buff *skb;
1565 struct scm_cookie tmp_scm;
1566 bool fds_sent = false;
1569 if (NULL == siocb->scm)
1570 siocb->scm = &tmp_scm;
1572 err = scm_send(sock, msg, siocb->scm);
1577 if (msg->msg_flags&MSG_OOB)
1580 if (msg->msg_namelen) {
1581 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1586 other = unix_peer(sk);
1591 if (sk->sk_shutdown & SEND_SHUTDOWN)
1594 while (sent < len) {
1596 * Optimisation for the fact that under 0.01% of X
1597 * messages typically need breaking up.
1602 /* Keep two messages in the pipe so it schedules better */
1603 if (size > ((sk->sk_sndbuf >> 1) - 64))
1604 size = (sk->sk_sndbuf >> 1) - 64;
1606 if (size > SKB_MAX_ALLOC)
1607 size = SKB_MAX_ALLOC;
1613 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1620 * If you pass two values to the sock_alloc_send_skb
1621 * it tries to grab the large buffer with GFP_NOFS
1622 * (which can fail easily), and if it fails grab the
1623 * fallback size buffer which is under a page and will
1626 size = min_t(int, size, skb_tailroom(skb));
1629 /* Only send the fds in the first buffer */
1630 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1635 max_level = err + 1;
1638 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1644 unix_state_lock(other);
1646 if (sock_flag(other, SOCK_DEAD) ||
1647 (other->sk_shutdown & RCV_SHUTDOWN))
1650 skb_queue_tail(&other->sk_receive_queue, skb);
1651 if (max_level > unix_sk(other)->recursion_level)
1652 unix_sk(other)->recursion_level = max_level;
1653 unix_state_unlock(other);
1654 other->sk_data_ready(other, size);
1658 scm_destroy(siocb->scm);
1664 unix_state_unlock(other);
1667 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1668 send_sig(SIGPIPE, current, 0);
1671 scm_destroy(siocb->scm);
1673 return sent ? : err;
1676 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1677 struct msghdr *msg, size_t len)
1680 struct sock *sk = sock->sk;
1682 err = sock_error(sk);
1686 if (sk->sk_state != TCP_ESTABLISHED)
1689 if (msg->msg_namelen)
1690 msg->msg_namelen = 0;
1692 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1695 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1697 struct unix_sock *u = unix_sk(sk);
1699 msg->msg_namelen = 0;
1701 msg->msg_namelen = u->addr->len;
1702 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1706 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1707 struct msghdr *msg, size_t size,
1710 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1711 struct scm_cookie tmp_scm;
1712 struct sock *sk = sock->sk;
1713 struct unix_sock *u = unix_sk(sk);
1714 int noblock = flags & MSG_DONTWAIT;
1715 struct sk_buff *skb;
1722 msg->msg_namelen = 0;
1724 mutex_lock(&u->readlock);
1726 skb = skb_recv_datagram(sk, flags, noblock, &err);
1728 unix_state_lock(sk);
1729 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1730 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1731 (sk->sk_shutdown & RCV_SHUTDOWN))
1733 unix_state_unlock(sk);
1737 wake_up_interruptible_sync(&u->peer_wait);
1740 unix_copy_addr(msg, skb->sk);
1742 if (size > skb->len)
1744 else if (size < skb->len)
1745 msg->msg_flags |= MSG_TRUNC;
1747 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1752 siocb->scm = &tmp_scm;
1753 memset(&tmp_scm, 0, sizeof(tmp_scm));
1755 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1756 unix_set_secdata(siocb->scm, skb);
1758 if (!(flags & MSG_PEEK)) {
1760 unix_detach_fds(siocb->scm, skb);
1762 /* It is questionable: on PEEK we could:
1763 - do not return fds - good, but too simple 8)
1764 - return fds, and do not return them on read (old strategy,
1766 - clone fds (I chose it for now, it is the most universal
1769 POSIX 1003.1g does not actually define this clearly
1770 at all. POSIX 1003.1g doesn't define a lot of things
1775 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1779 scm_recv(sock, msg, siocb->scm, flags);
1782 skb_free_datagram(sk, skb);
1784 mutex_unlock(&u->readlock);
1790 * Sleep until data has arrive. But check for races..
1793 static long unix_stream_data_wait(struct sock *sk, long timeo)
1797 unix_state_lock(sk);
1800 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1802 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1804 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1805 signal_pending(current) ||
1809 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1810 unix_state_unlock(sk);
1811 timeo = schedule_timeout(timeo);
1812 unix_state_lock(sk);
1813 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1816 finish_wait(sk_sleep(sk), &wait);
1817 unix_state_unlock(sk);
1823 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1824 struct msghdr *msg, size_t size,
1827 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1828 struct scm_cookie tmp_scm;
1829 struct sock *sk = sock->sk;
1830 struct unix_sock *u = unix_sk(sk);
1831 struct sockaddr_un *sunaddr = msg->msg_name;
1833 int check_creds = 0;
1839 if (sk->sk_state != TCP_ESTABLISHED)
1846 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1847 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1849 msg->msg_namelen = 0;
1851 /* Lock the socket to prevent queue disordering
1852 * while sleeps in memcpy_tomsg
1856 siocb->scm = &tmp_scm;
1857 memset(&tmp_scm, 0, sizeof(tmp_scm));
1860 mutex_lock(&u->readlock);
1864 struct sk_buff *skb;
1866 unix_state_lock(sk);
1867 skb = skb_dequeue(&sk->sk_receive_queue);
1869 unix_sk(sk)->recursion_level = 0;
1870 if (copied >= target)
1874 * POSIX 1003.1g mandates this order.
1877 err = sock_error(sk);
1880 if (sk->sk_shutdown & RCV_SHUTDOWN)
1883 unix_state_unlock(sk);
1887 mutex_unlock(&u->readlock);
1889 timeo = unix_stream_data_wait(sk, timeo);
1891 if (signal_pending(current)) {
1892 err = sock_intr_errno(timeo);
1895 mutex_lock(&u->readlock);
1898 unix_state_unlock(sk);
1901 unix_state_unlock(sk);
1904 /* Never glue messages from different writers */
1905 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1906 (UNIXCB(skb).cred != siocb->scm->cred)) {
1907 skb_queue_head(&sk->sk_receive_queue, skb);
1911 /* Copy credentials */
1912 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1916 /* Copy address just once */
1918 unix_copy_addr(msg, skb->sk);
1922 chunk = min_t(unsigned int, skb->len, size);
1923 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1924 skb_queue_head(&sk->sk_receive_queue, skb);
1932 /* Mark read part of skb as used */
1933 if (!(flags & MSG_PEEK)) {
1934 skb_pull(skb, chunk);
1937 unix_detach_fds(siocb->scm, skb);
1939 /* put the skb back if we didn't use it up.. */
1941 skb_queue_head(&sk->sk_receive_queue, skb);
1950 /* It is questionable, see note in unix_dgram_recvmsg.
1953 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1955 /* put message back and return */
1956 skb_queue_head(&sk->sk_receive_queue, skb);
1961 mutex_unlock(&u->readlock);
1962 scm_recv(sock, msg, siocb->scm, flags);
1964 return copied ? : err;
1967 static int unix_shutdown(struct socket *sock, int mode)
1969 struct sock *sk = sock->sk;
1972 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1975 unix_state_lock(sk);
1976 sk->sk_shutdown |= mode;
1977 other = unix_peer(sk);
1980 unix_state_unlock(sk);
1981 sk->sk_state_change(sk);
1984 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1988 if (mode&RCV_SHUTDOWN)
1989 peer_mode |= SEND_SHUTDOWN;
1990 if (mode&SEND_SHUTDOWN)
1991 peer_mode |= RCV_SHUTDOWN;
1992 unix_state_lock(other);
1993 other->sk_shutdown |= peer_mode;
1994 unix_state_unlock(other);
1995 other->sk_state_change(other);
1996 if (peer_mode == SHUTDOWN_MASK)
1997 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1998 else if (peer_mode & RCV_SHUTDOWN)
1999 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2007 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2009 struct sock *sk = sock->sk;
2015 amount = sk_wmem_alloc_get(sk);
2016 err = put_user(amount, (int __user *)arg);
2020 struct sk_buff *skb;
2022 if (sk->sk_state == TCP_LISTEN) {
2027 spin_lock(&sk->sk_receive_queue.lock);
2028 if (sk->sk_type == SOCK_STREAM ||
2029 sk->sk_type == SOCK_SEQPACKET) {
2030 skb_queue_walk(&sk->sk_receive_queue, skb)
2033 skb = skb_peek(&sk->sk_receive_queue);
2037 spin_unlock(&sk->sk_receive_queue.lock);
2038 err = put_user(amount, (int __user *)arg);
2049 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2051 struct sock *sk = sock->sk;
2054 sock_poll_wait(file, sk_sleep(sk), wait);
2057 /* exceptional events? */
2060 if (sk->sk_shutdown == SHUTDOWN_MASK)
2062 if (sk->sk_shutdown & RCV_SHUTDOWN)
2066 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2067 (sk->sk_shutdown & RCV_SHUTDOWN))
2068 mask |= POLLIN | POLLRDNORM;
2070 /* Connection-based need to check for termination and startup */
2071 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2072 sk->sk_state == TCP_CLOSE)
2076 * we set writable also when the other side has shut down the
2077 * connection. This prevents stuck sockets.
2079 if (unix_writable(sk))
2080 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2085 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2088 struct sock *sk = sock->sk, *other;
2089 unsigned int mask, writable;
2091 sock_poll_wait(file, sk_sleep(sk), wait);
2094 /* exceptional events? */
2095 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2097 if (sk->sk_shutdown & RCV_SHUTDOWN)
2099 if (sk->sk_shutdown == SHUTDOWN_MASK)
2103 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2104 (sk->sk_shutdown & RCV_SHUTDOWN))
2105 mask |= POLLIN | POLLRDNORM;
2107 /* Connection-based need to check for termination and startup */
2108 if (sk->sk_type == SOCK_SEQPACKET) {
2109 if (sk->sk_state == TCP_CLOSE)
2111 /* connection hasn't started yet? */
2112 if (sk->sk_state == TCP_SYN_SENT)
2117 writable = unix_writable(sk);
2119 other = unix_peer_get(sk);
2121 if (unix_peer(other) != sk) {
2122 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2124 if (unix_recvq_full(other))
2133 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2135 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2140 #ifdef CONFIG_PROC_FS
2141 static struct sock *first_unix_socket(int *i)
2143 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2144 if (!hlist_empty(&unix_socket_table[*i]))
2145 return __sk_head(&unix_socket_table[*i]);
2150 static struct sock *next_unix_socket(int *i, struct sock *s)
2152 struct sock *next = sk_next(s);
2153 /* More in this chain? */
2156 /* Look for next non-empty chain. */
2157 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2158 if (!hlist_empty(&unix_socket_table[*i]))
2159 return __sk_head(&unix_socket_table[*i]);
2164 struct unix_iter_state {
2165 struct seq_net_private p;
2169 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2171 struct unix_iter_state *iter = seq->private;
2175 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2176 if (sock_net(s) != seq_file_net(seq))
2185 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2186 __acquires(unix_table_lock)
2188 spin_lock(&unix_table_lock);
2189 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2192 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2194 struct unix_iter_state *iter = seq->private;
2195 struct sock *sk = v;
2198 if (v == SEQ_START_TOKEN)
2199 sk = first_unix_socket(&iter->i);
2201 sk = next_unix_socket(&iter->i, sk);
2202 while (sk && (sock_net(sk) != seq_file_net(seq)))
2203 sk = next_unix_socket(&iter->i, sk);
2207 static void unix_seq_stop(struct seq_file *seq, void *v)
2208 __releases(unix_table_lock)
2210 spin_unlock(&unix_table_lock);
2213 static int unix_seq_show(struct seq_file *seq, void *v)
2216 if (v == SEQ_START_TOKEN)
2217 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2221 struct unix_sock *u = unix_sk(s);
2224 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2226 atomic_read(&s->sk_refcnt),
2228 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2231 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2232 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2240 len = u->addr->len - sizeof(short);
2241 if (!UNIX_ABSTRACT(s))
2247 for ( ; i < len; i++)
2248 seq_putc(seq, u->addr->name->sun_path[i]);
2250 unix_state_unlock(s);
2251 seq_putc(seq, '\n');
2257 static const struct seq_operations unix_seq_ops = {
2258 .start = unix_seq_start,
2259 .next = unix_seq_next,
2260 .stop = unix_seq_stop,
2261 .show = unix_seq_show,
2264 static int unix_seq_open(struct inode *inode, struct file *file)
2266 return seq_open_net(inode, file, &unix_seq_ops,
2267 sizeof(struct unix_iter_state));
2270 static const struct file_operations unix_seq_fops = {
2271 .owner = THIS_MODULE,
2272 .open = unix_seq_open,
2274 .llseek = seq_lseek,
2275 .release = seq_release_net,
2280 static const struct net_proto_family unix_family_ops = {
2282 .create = unix_create,
2283 .owner = THIS_MODULE,
2287 static int __net_init unix_net_init(struct net *net)
2289 int error = -ENOMEM;
2291 net->unx.sysctl_max_dgram_qlen = 10;
2292 if (unix_sysctl_register(net))
2295 #ifdef CONFIG_PROC_FS
2296 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2297 unix_sysctl_unregister(net);
2306 static void __net_exit unix_net_exit(struct net *net)
2308 unix_sysctl_unregister(net);
2309 proc_net_remove(net, "unix");
2312 static struct pernet_operations unix_net_ops = {
2313 .init = unix_net_init,
2314 .exit = unix_net_exit,
2317 static int __init af_unix_init(void)
2320 struct sk_buff *dummy_skb;
2322 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2324 rc = proto_register(&unix_proto, 1);
2326 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2331 sock_register(&unix_family_ops);
2332 register_pernet_subsys(&unix_net_ops);
2337 static void __exit af_unix_exit(void)
2339 sock_unregister(PF_UNIX);
2340 proto_unregister(&unix_proto);
2341 unregister_pernet_subsys(&unix_net_ops);
2344 /* Earlier than device_initcall() so that other drivers invoking
2345 request_module() don't end up in a loop when modprobe tries
2346 to use a UNIX socket. But later than subsys_initcall() because
2347 we depend on stuff initialised there */
2348 fs_initcall(af_unix_init);
2349 module_exit(af_unix_exit);
2351 MODULE_LICENSE("GPL");
2352 MODULE_ALIAS_NETPROTO(PF_UNIX);