2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <asm/cacheflush.h>
62 #include <net/net_namespace.h>
65 #include <net/netlink.h>
67 #include "af_netlink.h"
71 unsigned long masks[0];
75 #define NETLINK_CONGESTED 0x0
78 #define NETLINK_KERNEL_SOCKET 0x1
79 #define NETLINK_RECV_PKTINFO 0x2
80 #define NETLINK_BROADCAST_SEND_ERROR 0x4
81 #define NETLINK_RECV_NO_ENOBUFS 0x8
83 static inline int netlink_is_kernel(struct sock *sk)
85 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
88 struct netlink_table *nl_table;
89 EXPORT_SYMBOL_GPL(nl_table);
91 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
93 static int netlink_dump(struct sock *sk);
94 static void netlink_skb_destructor(struct sk_buff *skb);
96 DEFINE_RWLOCK(nl_table_lock);
97 EXPORT_SYMBOL_GPL(nl_table_lock);
98 static atomic_t nl_table_users = ATOMIC_INIT(0);
100 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
102 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
104 static inline u32 netlink_group_mask(u32 group)
106 return group ? 1 << (group - 1) : 0;
109 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
111 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
114 static void netlink_overrun(struct sock *sk)
116 struct netlink_sock *nlk = nlk_sk(sk);
118 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
119 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
120 sk->sk_err = ENOBUFS;
121 sk->sk_error_report(sk);
124 atomic_inc(&sk->sk_drops);
127 static void netlink_rcv_wake(struct sock *sk)
129 struct netlink_sock *nlk = nlk_sk(sk);
131 if (skb_queue_empty(&sk->sk_receive_queue))
132 clear_bit(NETLINK_CONGESTED, &nlk->state);
133 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
134 wake_up_interruptible(&nlk->wait);
137 #ifdef CONFIG_NETLINK_MMAP
138 static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
140 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
143 static bool netlink_rx_is_mmaped(struct sock *sk)
145 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
148 static bool netlink_tx_is_mmaped(struct sock *sk)
150 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
153 static __pure struct page *pgvec_to_page(const void *addr)
155 if (is_vmalloc_addr(addr))
156 return vmalloc_to_page(addr);
158 return virt_to_page(addr);
161 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
165 for (i = 0; i < len; i++) {
166 if (pg_vec[i] != NULL) {
167 if (is_vmalloc_addr(pg_vec[i]))
170 free_pages((unsigned long)pg_vec[i], order);
176 static void *alloc_one_pg_vec_page(unsigned long order)
179 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
180 __GFP_NOWARN | __GFP_NORETRY;
182 buffer = (void *)__get_free_pages(gfp_flags, order);
186 buffer = vzalloc((1 << order) * PAGE_SIZE);
190 gfp_flags &= ~__GFP_NORETRY;
191 return (void *)__get_free_pages(gfp_flags, order);
194 static void **alloc_pg_vec(struct netlink_sock *nlk,
195 struct nl_mmap_req *req, unsigned int order)
197 unsigned int block_nr = req->nm_block_nr;
201 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
205 for (i = 0; i < block_nr; i++) {
206 pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
207 if (pg_vec[i] == NULL)
213 free_pg_vec(pg_vec, order, block_nr);
217 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
218 bool closing, bool tx_ring)
220 struct netlink_sock *nlk = nlk_sk(sk);
221 struct netlink_ring *ring;
222 struct sk_buff_head *queue;
223 void **pg_vec = NULL;
224 unsigned int order = 0;
227 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
228 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
231 if (atomic_read(&nlk->mapped))
233 if (atomic_read(&ring->pending))
237 if (req->nm_block_nr) {
238 if (ring->pg_vec != NULL)
241 if ((int)req->nm_block_size <= 0)
243 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
245 if (req->nm_frame_size < NL_MMAP_HDRLEN)
247 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
250 ring->frames_per_block = req->nm_block_size /
252 if (ring->frames_per_block == 0)
254 if (ring->frames_per_block * req->nm_block_nr !=
258 order = get_order(req->nm_block_size);
259 pg_vec = alloc_pg_vec(nlk, req, order);
263 if (req->nm_frame_nr)
268 mutex_lock(&nlk->pg_vec_lock);
269 if (closing || atomic_read(&nlk->mapped) == 0) {
271 spin_lock_bh(&queue->lock);
273 ring->frame_max = req->nm_frame_nr - 1;
275 ring->frame_size = req->nm_frame_size;
276 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
278 swap(ring->pg_vec_len, req->nm_block_nr);
279 swap(ring->pg_vec_order, order);
280 swap(ring->pg_vec, pg_vec);
282 __skb_queue_purge(queue);
283 spin_unlock_bh(&queue->lock);
285 WARN_ON(atomic_read(&nlk->mapped));
287 mutex_unlock(&nlk->pg_vec_lock);
290 free_pg_vec(pg_vec, order, req->nm_block_nr);
294 static void netlink_mm_open(struct vm_area_struct *vma)
296 struct file *file = vma->vm_file;
297 struct socket *sock = file->private_data;
298 struct sock *sk = sock->sk;
301 atomic_inc(&nlk_sk(sk)->mapped);
304 static void netlink_mm_close(struct vm_area_struct *vma)
306 struct file *file = vma->vm_file;
307 struct socket *sock = file->private_data;
308 struct sock *sk = sock->sk;
311 atomic_dec(&nlk_sk(sk)->mapped);
314 static const struct vm_operations_struct netlink_mmap_ops = {
315 .open = netlink_mm_open,
316 .close = netlink_mm_close,
319 static int netlink_mmap(struct file *file, struct socket *sock,
320 struct vm_area_struct *vma)
322 struct sock *sk = sock->sk;
323 struct netlink_sock *nlk = nlk_sk(sk);
324 struct netlink_ring *ring;
325 unsigned long start, size, expected;
332 mutex_lock(&nlk->pg_vec_lock);
335 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
336 if (ring->pg_vec == NULL)
338 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
344 size = vma->vm_end - vma->vm_start;
345 if (size != expected)
348 start = vma->vm_start;
349 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
350 if (ring->pg_vec == NULL)
353 for (i = 0; i < ring->pg_vec_len; i++) {
355 void *kaddr = ring->pg_vec[i];
358 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
359 page = pgvec_to_page(kaddr);
360 err = vm_insert_page(vma, start, page);
369 atomic_inc(&nlk->mapped);
370 vma->vm_ops = &netlink_mmap_ops;
373 mutex_unlock(&nlk->pg_vec_lock);
377 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
379 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
380 struct page *p_start, *p_end;
382 /* First page is flushed through netlink_{get,set}_status */
383 p_start = pgvec_to_page(hdr + PAGE_SIZE);
384 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
385 while (p_start <= p_end) {
386 flush_dcache_page(p_start);
392 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
395 flush_dcache_page(pgvec_to_page(hdr));
396 return hdr->nm_status;
399 static void netlink_set_status(struct nl_mmap_hdr *hdr,
400 enum nl_mmap_status status)
403 hdr->nm_status = status;
404 flush_dcache_page(pgvec_to_page(hdr));
407 static struct nl_mmap_hdr *
408 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
410 unsigned int pg_vec_pos, frame_off;
412 pg_vec_pos = pos / ring->frames_per_block;
413 frame_off = pos % ring->frames_per_block;
415 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
418 static struct nl_mmap_hdr *
419 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
420 enum nl_mmap_status status)
422 struct nl_mmap_hdr *hdr;
424 hdr = __netlink_lookup_frame(ring, pos);
425 if (netlink_get_status(hdr) != status)
431 static struct nl_mmap_hdr *
432 netlink_current_frame(const struct netlink_ring *ring,
433 enum nl_mmap_status status)
435 return netlink_lookup_frame(ring, ring->head, status);
438 static struct nl_mmap_hdr *
439 netlink_previous_frame(const struct netlink_ring *ring,
440 enum nl_mmap_status status)
444 prev = ring->head ? ring->head - 1 : ring->frame_max;
445 return netlink_lookup_frame(ring, prev, status);
448 static void netlink_increment_head(struct netlink_ring *ring)
450 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
453 static void netlink_forward_ring(struct netlink_ring *ring)
455 unsigned int head = ring->head, pos = head;
456 const struct nl_mmap_hdr *hdr;
459 hdr = __netlink_lookup_frame(ring, pos);
460 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
462 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
464 netlink_increment_head(ring);
465 } while (ring->head != head);
468 static bool netlink_dump_space(struct netlink_sock *nlk)
470 struct netlink_ring *ring = &nlk->rx_ring;
471 struct nl_mmap_hdr *hdr;
474 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
478 n = ring->head + ring->frame_max / 2;
479 if (n > ring->frame_max)
480 n -= ring->frame_max;
482 hdr = __netlink_lookup_frame(ring, n);
484 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
487 static unsigned int netlink_poll(struct file *file, struct socket *sock,
490 struct sock *sk = sock->sk;
491 struct netlink_sock *nlk = nlk_sk(sk);
495 if (nlk->rx_ring.pg_vec != NULL) {
496 /* Memory mapped sockets don't call recvmsg(), so flow control
497 * for dumps is performed here. A dump is allowed to continue
498 * if at least half the ring is unused.
500 while (nlk->cb != NULL && netlink_dump_space(nlk)) {
501 err = netlink_dump(sk);
504 sk->sk_error_report(sk);
508 netlink_rcv_wake(sk);
511 mask = datagram_poll(file, sock, wait);
513 spin_lock_bh(&sk->sk_receive_queue.lock);
514 if (nlk->rx_ring.pg_vec) {
515 netlink_forward_ring(&nlk->rx_ring);
516 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
517 mask |= POLLIN | POLLRDNORM;
519 spin_unlock_bh(&sk->sk_receive_queue.lock);
521 spin_lock_bh(&sk->sk_write_queue.lock);
522 if (nlk->tx_ring.pg_vec) {
523 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
524 mask |= POLLOUT | POLLWRNORM;
526 spin_unlock_bh(&sk->sk_write_queue.lock);
531 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
533 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
536 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
537 struct netlink_ring *ring,
538 struct nl_mmap_hdr *hdr)
543 size = ring->frame_size - NL_MMAP_HDRLEN;
544 data = (void *)hdr + NL_MMAP_HDRLEN;
548 skb_reset_tail_pointer(skb);
549 skb->end = skb->tail + size;
552 skb->destructor = netlink_skb_destructor;
553 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
554 NETLINK_CB(skb).sk = sk;
557 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
558 u32 dst_portid, u32 dst_group,
559 struct sock_iocb *siocb)
561 struct netlink_sock *nlk = nlk_sk(sk);
562 struct netlink_ring *ring;
563 struct nl_mmap_hdr *hdr;
566 int err = 0, len = 0;
568 mutex_lock(&nlk->pg_vec_lock);
570 ring = &nlk->tx_ring;
571 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
576 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
578 if (!(msg->msg_flags & MSG_DONTWAIT) &&
579 atomic_read(&nlk->tx_ring.pending))
584 nm_len = ACCESS_ONCE(hdr->nm_len);
585 if (nm_len > maxlen) {
590 netlink_frame_flush_dcache(hdr, nm_len);
592 skb = alloc_skb(nm_len, GFP_KERNEL);
597 __skb_put(skb, nm_len);
598 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
599 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
601 netlink_increment_head(ring);
603 NETLINK_CB(skb).portid = nlk->portid;
604 NETLINK_CB(skb).dst_group = dst_group;
605 NETLINK_CB(skb).creds = siocb->scm->creds;
607 err = security_netlink_send(sk, skb);
613 if (unlikely(dst_group)) {
614 atomic_inc(&skb->users);
615 netlink_broadcast(sk, skb, dst_portid, dst_group,
618 err = netlink_unicast(sk, skb, dst_portid,
619 msg->msg_flags & MSG_DONTWAIT);
624 } while (hdr != NULL ||
625 (!(msg->msg_flags & MSG_DONTWAIT) &&
626 atomic_read(&nlk->tx_ring.pending)));
631 mutex_unlock(&nlk->pg_vec_lock);
635 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
637 struct nl_mmap_hdr *hdr;
639 hdr = netlink_mmap_hdr(skb);
640 hdr->nm_len = skb->len;
641 hdr->nm_group = NETLINK_CB(skb).dst_group;
642 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
643 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
644 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
645 netlink_frame_flush_dcache(hdr, hdr->nm_len);
646 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
648 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
652 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
654 struct netlink_sock *nlk = nlk_sk(sk);
655 struct netlink_ring *ring = &nlk->rx_ring;
656 struct nl_mmap_hdr *hdr;
658 spin_lock_bh(&sk->sk_receive_queue.lock);
659 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
661 spin_unlock_bh(&sk->sk_receive_queue.lock);
666 netlink_increment_head(ring);
667 __skb_queue_tail(&sk->sk_receive_queue, skb);
668 spin_unlock_bh(&sk->sk_receive_queue.lock);
670 hdr->nm_len = skb->len;
671 hdr->nm_group = NETLINK_CB(skb).dst_group;
672 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
673 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
674 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
675 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
678 #else /* CONFIG_NETLINK_MMAP */
679 #define netlink_skb_is_mmaped(skb) false
680 #define netlink_rx_is_mmaped(sk) false
681 #define netlink_tx_is_mmaped(sk) false
682 #define netlink_mmap sock_no_mmap
683 #define netlink_poll datagram_poll
684 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
685 #endif /* CONFIG_NETLINK_MMAP */
687 static void netlink_destroy_callback(struct netlink_callback *cb)
693 static void netlink_consume_callback(struct netlink_callback *cb)
695 consume_skb(cb->skb);
699 static void netlink_skb_destructor(struct sk_buff *skb)
701 #ifdef CONFIG_NETLINK_MMAP
702 struct nl_mmap_hdr *hdr;
703 struct netlink_ring *ring;
706 /* If a packet from the kernel to userspace was freed because of an
707 * error without being delivered to userspace, the kernel must reset
708 * the status. In the direction userspace to kernel, the status is
709 * always reset here after the packet was processed and freed.
711 if (netlink_skb_is_mmaped(skb)) {
712 hdr = netlink_mmap_hdr(skb);
713 sk = NETLINK_CB(skb).sk;
715 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
716 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
717 ring = &nlk_sk(sk)->tx_ring;
719 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
721 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
723 ring = &nlk_sk(sk)->rx_ring;
726 WARN_ON(atomic_read(&ring->pending) == 0);
727 atomic_dec(&ring->pending);
737 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
739 WARN_ON(skb->sk != NULL);
741 skb->destructor = netlink_skb_destructor;
742 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
743 sk_mem_charge(sk, skb->truesize);
746 static void netlink_sock_destruct(struct sock *sk)
748 struct netlink_sock *nlk = nlk_sk(sk);
752 nlk->cb->done(nlk->cb);
754 module_put(nlk->cb->module);
755 netlink_destroy_callback(nlk->cb);
758 skb_queue_purge(&sk->sk_receive_queue);
759 #ifdef CONFIG_NETLINK_MMAP
761 struct nl_mmap_req req;
763 memset(&req, 0, sizeof(req));
764 if (nlk->rx_ring.pg_vec)
765 netlink_set_ring(sk, &req, true, false);
766 memset(&req, 0, sizeof(req));
767 if (nlk->tx_ring.pg_vec)
768 netlink_set_ring(sk, &req, true, true);
770 #endif /* CONFIG_NETLINK_MMAP */
772 if (!sock_flag(sk, SOCK_DEAD)) {
773 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
777 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
778 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
779 WARN_ON(nlk_sk(sk)->groups);
782 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
783 * SMP. Look, when several writers sleep and reader wakes them up, all but one
784 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
785 * this, _but_ remember, it adds useless work on UP machines.
788 void netlink_table_grab(void)
789 __acquires(nl_table_lock)
793 write_lock_irq(&nl_table_lock);
795 if (atomic_read(&nl_table_users)) {
796 DECLARE_WAITQUEUE(wait, current);
798 add_wait_queue_exclusive(&nl_table_wait, &wait);
800 set_current_state(TASK_UNINTERRUPTIBLE);
801 if (atomic_read(&nl_table_users) == 0)
803 write_unlock_irq(&nl_table_lock);
805 write_lock_irq(&nl_table_lock);
808 __set_current_state(TASK_RUNNING);
809 remove_wait_queue(&nl_table_wait, &wait);
813 void netlink_table_ungrab(void)
814 __releases(nl_table_lock)
816 write_unlock_irq(&nl_table_lock);
817 wake_up(&nl_table_wait);
821 netlink_lock_table(void)
823 /* read_lock() synchronizes us to netlink_table_grab */
825 read_lock(&nl_table_lock);
826 atomic_inc(&nl_table_users);
827 read_unlock(&nl_table_lock);
831 netlink_unlock_table(void)
833 if (atomic_dec_and_test(&nl_table_users))
834 wake_up(&nl_table_wait);
837 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
839 struct nl_portid_hash *hash = &nl_table[protocol].hash;
840 struct hlist_head *head;
843 read_lock(&nl_table_lock);
844 head = nl_portid_hashfn(hash, portid);
845 sk_for_each(sk, head) {
846 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
853 read_unlock(&nl_table_lock);
857 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
859 if (size <= PAGE_SIZE)
860 return kzalloc(size, GFP_ATOMIC);
862 return (struct hlist_head *)
863 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
867 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
869 if (size <= PAGE_SIZE)
872 free_pages((unsigned long)table, get_order(size));
875 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
877 unsigned int omask, mask, shift;
879 struct hlist_head *otable, *table;
882 omask = mask = hash->mask;
883 osize = size = (mask + 1) * sizeof(*table);
887 if (++shift > hash->max_shift)
893 table = nl_portid_hash_zalloc(size);
897 otable = hash->table;
901 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
903 for (i = 0; i <= omask; i++) {
905 struct hlist_node *tmp;
907 sk_for_each_safe(sk, tmp, &otable[i])
908 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
911 nl_portid_hash_free(otable, osize);
912 hash->rehash_time = jiffies + 10 * 60 * HZ;
916 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
918 int avg = hash->entries >> hash->shift;
920 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
923 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
924 nl_portid_hash_rehash(hash, 0);
931 static const struct proto_ops netlink_ops;
934 netlink_update_listeners(struct sock *sk)
936 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
939 struct listeners *listeners;
941 listeners = nl_deref_protected(tbl->listeners);
945 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
947 sk_for_each_bound(sk, &tbl->mc_list) {
948 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
949 mask |= nlk_sk(sk)->groups[i];
951 listeners->masks[i] = mask;
953 /* this function is only called with the netlink table "grabbed", which
954 * makes sure updates are visible before bind or setsockopt return. */
957 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
959 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
960 struct hlist_head *head;
961 int err = -EADDRINUSE;
965 netlink_table_grab();
966 head = nl_portid_hashfn(hash, portid);
968 sk_for_each(osk, head) {
969 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
977 if (nlk_sk(sk)->portid)
981 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
984 if (len && nl_portid_hash_dilute(hash, len))
985 head = nl_portid_hashfn(hash, portid);
987 nlk_sk(sk)->portid = portid;
988 sk_add_node(sk, head);
992 netlink_table_ungrab();
996 static void netlink_remove(struct sock *sk)
998 netlink_table_grab();
999 if (sk_del_node_init(sk))
1000 nl_table[sk->sk_protocol].hash.entries--;
1001 if (nlk_sk(sk)->subscriptions)
1002 __sk_del_bind_node(sk);
1003 netlink_table_ungrab();
1006 static struct proto netlink_proto = {
1008 .owner = THIS_MODULE,
1009 .obj_size = sizeof(struct netlink_sock),
1012 static int __netlink_create(struct net *net, struct socket *sock,
1013 struct mutex *cb_mutex, int protocol)
1016 struct netlink_sock *nlk;
1018 sock->ops = &netlink_ops;
1020 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1024 sock_init_data(sock, sk);
1028 nlk->cb_mutex = cb_mutex;
1030 nlk->cb_mutex = &nlk->cb_def_mutex;
1031 mutex_init(nlk->cb_mutex);
1033 init_waitqueue_head(&nlk->wait);
1034 #ifdef CONFIG_NETLINK_MMAP
1035 mutex_init(&nlk->pg_vec_lock);
1038 sk->sk_destruct = netlink_sock_destruct;
1039 sk->sk_protocol = protocol;
1043 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1046 struct module *module = NULL;
1047 struct mutex *cb_mutex;
1048 struct netlink_sock *nlk;
1049 void (*bind)(int group);
1052 sock->state = SS_UNCONNECTED;
1054 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1055 return -ESOCKTNOSUPPORT;
1057 if (protocol < 0 || protocol >= MAX_LINKS)
1058 return -EPROTONOSUPPORT;
1060 netlink_lock_table();
1061 #ifdef CONFIG_MODULES
1062 if (!nl_table[protocol].registered) {
1063 netlink_unlock_table();
1064 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1065 netlink_lock_table();
1068 if (nl_table[protocol].registered &&
1069 try_module_get(nl_table[protocol].module))
1070 module = nl_table[protocol].module;
1072 err = -EPROTONOSUPPORT;
1073 cb_mutex = nl_table[protocol].cb_mutex;
1074 bind = nl_table[protocol].bind;
1075 netlink_unlock_table();
1080 err = __netlink_create(net, sock, cb_mutex, protocol);
1085 sock_prot_inuse_add(net, &netlink_proto, 1);
1088 nlk = nlk_sk(sock->sk);
1089 nlk->module = module;
1090 nlk->netlink_bind = bind;
1099 static int netlink_release(struct socket *sock)
1101 struct sock *sk = sock->sk;
1102 struct netlink_sock *nlk;
1112 * OK. Socket is unlinked, any packets that arrive now
1117 wake_up_interruptible_all(&nlk->wait);
1119 skb_queue_purge(&sk->sk_write_queue);
1122 struct netlink_notify n = {
1123 .net = sock_net(sk),
1124 .protocol = sk->sk_protocol,
1125 .portid = nlk->portid,
1127 atomic_notifier_call_chain(&netlink_chain,
1128 NETLINK_URELEASE, &n);
1131 module_put(nlk->module);
1133 netlink_table_grab();
1134 if (netlink_is_kernel(sk)) {
1135 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1136 if (--nl_table[sk->sk_protocol].registered == 0) {
1137 struct listeners *old;
1139 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1140 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1141 kfree_rcu(old, rcu);
1142 nl_table[sk->sk_protocol].module = NULL;
1143 nl_table[sk->sk_protocol].bind = NULL;
1144 nl_table[sk->sk_protocol].flags = 0;
1145 nl_table[sk->sk_protocol].registered = 0;
1147 } else if (nlk->subscriptions) {
1148 netlink_update_listeners(sk);
1150 netlink_table_ungrab();
1156 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1162 static int netlink_autobind(struct socket *sock)
1164 struct sock *sk = sock->sk;
1165 struct net *net = sock_net(sk);
1166 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
1167 struct hlist_head *head;
1169 s32 portid = task_tgid_vnr(current);
1171 static s32 rover = -4097;
1175 netlink_table_grab();
1176 head = nl_portid_hashfn(hash, portid);
1177 sk_for_each(osk, head) {
1178 if (!net_eq(sock_net(osk), net))
1180 if (nlk_sk(osk)->portid == portid) {
1181 /* Bind collision, search negative portid values. */
1185 netlink_table_ungrab();
1189 netlink_table_ungrab();
1191 err = netlink_insert(sk, net, portid);
1192 if (err == -EADDRINUSE)
1195 /* If 2 threads race to autobind, that is fine. */
1203 * __netlink_ns_capable - General netlink message capability test
1204 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1205 * @user_ns: The user namespace of the capability to use
1206 * @cap: The capability to use
1208 * Test to see if the opener of the socket we received the message
1209 * from had when the netlink socket was created and the sender of the
1210 * message has has the capability @cap in the user namespace @user_ns.
1212 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1213 struct user_namespace *user_ns, int cap)
1215 return ((nsp->flags & NETLINK_SKB_DST) ||
1216 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1217 ns_capable(user_ns, cap);
1219 EXPORT_SYMBOL(__netlink_ns_capable);
1222 * netlink_ns_capable - General netlink message capability test
1223 * @skb: socket buffer holding a netlink command from userspace
1224 * @user_ns: The user namespace of the capability to use
1225 * @cap: The capability to use
1227 * Test to see if the opener of the socket we received the message
1228 * from had when the netlink socket was created and the sender of the
1229 * message has has the capability @cap in the user namespace @user_ns.
1231 bool netlink_ns_capable(const struct sk_buff *skb,
1232 struct user_namespace *user_ns, int cap)
1234 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1236 EXPORT_SYMBOL(netlink_ns_capable);
1239 * netlink_capable - Netlink global message capability test
1240 * @skb: socket buffer holding a netlink command from userspace
1241 * @cap: The capability to use
1243 * Test to see if the opener of the socket we received the message
1244 * from had when the netlink socket was created and the sender of the
1245 * message has has the capability @cap in all user namespaces.
1247 bool netlink_capable(const struct sk_buff *skb, int cap)
1249 return netlink_ns_capable(skb, &init_user_ns, cap);
1251 EXPORT_SYMBOL(netlink_capable);
1254 * netlink_net_capable - Netlink network namespace message capability test
1255 * @skb: socket buffer holding a netlink command from userspace
1256 * @cap: The capability to use
1258 * Test to see if the opener of the socket we received the message
1259 * from had when the netlink socket was created and the sender of the
1260 * message has has the capability @cap over the network namespace of
1261 * the socket we received the message from.
1263 bool netlink_net_capable(const struct sk_buff *skb, int cap)
1265 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1267 EXPORT_SYMBOL(netlink_net_capable);
1269 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1271 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1272 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1276 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1278 struct netlink_sock *nlk = nlk_sk(sk);
1280 if (nlk->subscriptions && !subscriptions)
1281 __sk_del_bind_node(sk);
1282 else if (!nlk->subscriptions && subscriptions)
1283 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1284 nlk->subscriptions = subscriptions;
1287 static int netlink_realloc_groups(struct sock *sk)
1289 struct netlink_sock *nlk = nlk_sk(sk);
1290 unsigned int groups;
1291 unsigned long *new_groups;
1294 netlink_table_grab();
1296 groups = nl_table[sk->sk_protocol].groups;
1297 if (!nl_table[sk->sk_protocol].registered) {
1302 if (nlk->ngroups >= groups)
1305 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1306 if (new_groups == NULL) {
1310 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1311 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1313 nlk->groups = new_groups;
1314 nlk->ngroups = groups;
1316 netlink_table_ungrab();
1320 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1323 struct sock *sk = sock->sk;
1324 struct net *net = sock_net(sk);
1325 struct netlink_sock *nlk = nlk_sk(sk);
1326 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1329 if (addr_len < sizeof(struct sockaddr_nl))
1332 if (nladdr->nl_family != AF_NETLINK)
1335 /* Only superuser is allowed to listen multicasts */
1336 if (nladdr->nl_groups) {
1337 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1339 err = netlink_realloc_groups(sk);
1345 if (nladdr->nl_pid != nlk->portid)
1348 err = nladdr->nl_pid ?
1349 netlink_insert(sk, net, nladdr->nl_pid) :
1350 netlink_autobind(sock);
1355 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1358 netlink_table_grab();
1359 netlink_update_subscriptions(sk, nlk->subscriptions +
1360 hweight32(nladdr->nl_groups) -
1361 hweight32(nlk->groups[0]));
1362 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1363 netlink_update_listeners(sk);
1364 netlink_table_ungrab();
1366 if (nlk->netlink_bind && nlk->groups[0]) {
1369 for (i=0; i<nlk->ngroups; i++) {
1370 if (test_bit(i, nlk->groups))
1371 nlk->netlink_bind(i);
1378 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1379 int alen, int flags)
1382 struct sock *sk = sock->sk;
1383 struct netlink_sock *nlk = nlk_sk(sk);
1384 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1386 if (alen < sizeof(addr->sa_family))
1389 if (addr->sa_family == AF_UNSPEC) {
1390 sk->sk_state = NETLINK_UNCONNECTED;
1391 nlk->dst_portid = 0;
1395 if (addr->sa_family != AF_NETLINK)
1398 /* Only superuser is allowed to send multicasts */
1399 if (nladdr->nl_groups && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1403 err = netlink_autobind(sock);
1406 sk->sk_state = NETLINK_CONNECTED;
1407 nlk->dst_portid = nladdr->nl_pid;
1408 nlk->dst_group = ffs(nladdr->nl_groups);
1414 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1415 int *addr_len, int peer)
1417 struct sock *sk = sock->sk;
1418 struct netlink_sock *nlk = nlk_sk(sk);
1419 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1421 nladdr->nl_family = AF_NETLINK;
1423 *addr_len = sizeof(*nladdr);
1426 nladdr->nl_pid = nlk->dst_portid;
1427 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1429 nladdr->nl_pid = nlk->portid;
1430 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1435 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1438 struct netlink_sock *nlk;
1440 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1442 return ERR_PTR(-ECONNREFUSED);
1444 /* Don't bother queuing skb if kernel socket has no input function */
1446 if (sock->sk_state == NETLINK_CONNECTED &&
1447 nlk->dst_portid != nlk_sk(ssk)->portid) {
1449 return ERR_PTR(-ECONNREFUSED);
1454 struct sock *netlink_getsockbyfilp(struct file *filp)
1456 struct inode *inode = file_inode(filp);
1459 if (!S_ISSOCK(inode->i_mode))
1460 return ERR_PTR(-ENOTSOCK);
1462 sock = SOCKET_I(inode)->sk;
1463 if (sock->sk_family != AF_NETLINK)
1464 return ERR_PTR(-EINVAL);
1471 * Attach a skb to a netlink socket.
1472 * The caller must hold a reference to the destination socket. On error, the
1473 * reference is dropped. The skb is not send to the destination, just all
1474 * all error checks are performed and memory in the queue is reserved.
1476 * < 0: error. skb freed, reference to sock dropped.
1478 * 1: repeat lookup - reference dropped while waiting for socket memory.
1480 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1481 long *timeo, struct sock *ssk)
1483 struct netlink_sock *nlk;
1487 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1488 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1489 !netlink_skb_is_mmaped(skb)) {
1490 DECLARE_WAITQUEUE(wait, current);
1492 if (!ssk || netlink_is_kernel(ssk))
1493 netlink_overrun(sk);
1499 __set_current_state(TASK_INTERRUPTIBLE);
1500 add_wait_queue(&nlk->wait, &wait);
1502 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1503 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1504 !sock_flag(sk, SOCK_DEAD))
1505 *timeo = schedule_timeout(*timeo);
1507 __set_current_state(TASK_RUNNING);
1508 remove_wait_queue(&nlk->wait, &wait);
1511 if (signal_pending(current)) {
1513 return sock_intr_errno(*timeo);
1517 netlink_skb_set_owner_r(skb, sk);
1521 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1525 #ifdef CONFIG_NETLINK_MMAP
1526 if (netlink_skb_is_mmaped(skb))
1527 netlink_queue_mmaped_skb(sk, skb);
1528 else if (netlink_rx_is_mmaped(sk))
1529 netlink_ring_set_copied(sk, skb);
1531 #endif /* CONFIG_NETLINK_MMAP */
1532 skb_queue_tail(&sk->sk_receive_queue, skb);
1533 sk->sk_data_ready(sk, len);
1537 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1539 int len = __netlink_sendskb(sk, skb);
1545 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1551 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1555 WARN_ON(skb->sk != NULL);
1556 if (netlink_skb_is_mmaped(skb))
1559 delta = skb->end - skb->tail;
1560 if (delta * 2 < skb->truesize)
1563 if (skb_shared(skb)) {
1564 struct sk_buff *nskb = skb_clone(skb, allocation);
1571 if (!pskb_expand_head(skb, 0, -delta, allocation))
1572 skb->truesize -= delta;
1577 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1581 struct netlink_sock *nlk = nlk_sk(sk);
1583 ret = -ECONNREFUSED;
1584 if (nlk->netlink_rcv != NULL) {
1586 netlink_skb_set_owner_r(skb, sk);
1587 NETLINK_CB(skb).sk = ssk;
1588 nlk->netlink_rcv(skb);
1597 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1598 u32 portid, int nonblock)
1604 skb = netlink_trim(skb, gfp_any());
1606 timeo = sock_sndtimeo(ssk, nonblock);
1608 sk = netlink_getsockbyportid(ssk, portid);
1613 if (netlink_is_kernel(sk))
1614 return netlink_unicast_kernel(sk, skb, ssk);
1616 if (sk_filter(sk, skb)) {
1623 err = netlink_attachskb(sk, skb, &timeo, ssk);
1629 return netlink_sendskb(sk, skb);
1631 EXPORT_SYMBOL(netlink_unicast);
1633 struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1634 u32 dst_portid, gfp_t gfp_mask)
1636 #ifdef CONFIG_NETLINK_MMAP
1637 struct sock *sk = NULL;
1638 struct sk_buff *skb;
1639 struct netlink_ring *ring;
1640 struct nl_mmap_hdr *hdr;
1641 unsigned int maxlen;
1643 sk = netlink_getsockbyportid(ssk, dst_portid);
1647 ring = &nlk_sk(sk)->rx_ring;
1648 /* fast-path without atomic ops for common case: non-mmaped receiver */
1649 if (ring->pg_vec == NULL)
1652 skb = alloc_skb_head(gfp_mask);
1656 spin_lock_bh(&sk->sk_receive_queue.lock);
1657 /* check again under lock */
1658 if (ring->pg_vec == NULL)
1661 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1665 netlink_forward_ring(ring);
1666 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1669 netlink_ring_setup_skb(skb, sk, ring, hdr);
1670 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1671 atomic_inc(&ring->pending);
1672 netlink_increment_head(ring);
1674 spin_unlock_bh(&sk->sk_receive_queue.lock);
1679 spin_unlock_bh(&sk->sk_receive_queue.lock);
1680 netlink_overrun(sk);
1687 spin_unlock_bh(&sk->sk_receive_queue.lock);
1692 return alloc_skb(size, gfp_mask);
1694 EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1696 int netlink_has_listeners(struct sock *sk, unsigned int group)
1699 struct listeners *listeners;
1701 BUG_ON(!netlink_is_kernel(sk));
1704 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1706 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1707 res = test_bit(group - 1, listeners->masks);
1713 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1715 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1717 struct netlink_sock *nlk = nlk_sk(sk);
1719 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1720 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1721 netlink_skb_set_owner_r(skb, sk);
1722 __netlink_sendskb(sk, skb);
1723 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1728 struct netlink_broadcast_data {
1729 struct sock *exclude_sk;
1734 int delivery_failure;
1738 struct sk_buff *skb, *skb2;
1739 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1743 static int do_one_broadcast(struct sock *sk,
1744 struct netlink_broadcast_data *p)
1746 struct netlink_sock *nlk = nlk_sk(sk);
1749 if (p->exclude_sk == sk)
1752 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1753 !test_bit(p->group - 1, nlk->groups))
1756 if (!net_eq(sock_net(sk), p->net))
1760 netlink_overrun(sk);
1765 if (p->skb2 == NULL) {
1766 if (skb_shared(p->skb)) {
1767 p->skb2 = skb_clone(p->skb, p->allocation);
1769 p->skb2 = skb_get(p->skb);
1771 * skb ownership may have been set when
1772 * delivered to a previous socket.
1774 skb_orphan(p->skb2);
1777 if (p->skb2 == NULL) {
1778 netlink_overrun(sk);
1779 /* Clone failed. Notify ALL listeners. */
1781 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1782 p->delivery_failure = 1;
1783 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1786 } else if (sk_filter(sk, p->skb2)) {
1789 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1790 netlink_overrun(sk);
1791 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1792 p->delivery_failure = 1;
1794 p->congested |= val;
1804 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1805 u32 group, gfp_t allocation,
1806 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1809 struct net *net = sock_net(ssk);
1810 struct netlink_broadcast_data info;
1813 skb = netlink_trim(skb, allocation);
1815 info.exclude_sk = ssk;
1817 info.portid = portid;
1820 info.delivery_failure = 0;
1823 info.allocation = allocation;
1826 info.tx_filter = filter;
1827 info.tx_data = filter_data;
1829 /* While we sleep in clone, do not allow to change socket list */
1831 netlink_lock_table();
1833 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1834 do_one_broadcast(sk, &info);
1838 netlink_unlock_table();
1840 if (info.delivery_failure) {
1841 kfree_skb(info.skb2);
1844 consume_skb(info.skb2);
1846 if (info.delivered) {
1847 if (info.congested && (allocation & __GFP_WAIT))
1853 EXPORT_SYMBOL(netlink_broadcast_filtered);
1855 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1856 u32 group, gfp_t allocation)
1858 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1861 EXPORT_SYMBOL(netlink_broadcast);
1863 struct netlink_set_err_data {
1864 struct sock *exclude_sk;
1870 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1872 struct netlink_sock *nlk = nlk_sk(sk);
1875 if (sk == p->exclude_sk)
1878 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1881 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1882 !test_bit(p->group - 1, nlk->groups))
1885 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1890 sk->sk_err = p->code;
1891 sk->sk_error_report(sk);
1897 * netlink_set_err - report error to broadcast listeners
1898 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1899 * @portid: the PORTID of a process that we want to skip (if any)
1900 * @groups: the broadcast group that will notice the error
1901 * @code: error code, must be negative (as usual in kernelspace)
1903 * This function returns the number of broadcast listeners that have set the
1904 * NETLINK_RECV_NO_ENOBUFS socket option.
1906 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1908 struct netlink_set_err_data info;
1912 info.exclude_sk = ssk;
1913 info.portid = portid;
1915 /* sk->sk_err wants a positive error value */
1918 read_lock(&nl_table_lock);
1920 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1921 ret += do_one_set_err(sk, &info);
1923 read_unlock(&nl_table_lock);
1926 EXPORT_SYMBOL(netlink_set_err);
1928 /* must be called with netlink table grabbed */
1929 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1933 int old, new = !!is_new, subscriptions;
1935 old = test_bit(group - 1, nlk->groups);
1936 subscriptions = nlk->subscriptions - old + new;
1938 __set_bit(group - 1, nlk->groups);
1940 __clear_bit(group - 1, nlk->groups);
1941 netlink_update_subscriptions(&nlk->sk, subscriptions);
1942 netlink_update_listeners(&nlk->sk);
1945 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1946 char __user *optval, unsigned int optlen)
1948 struct sock *sk = sock->sk;
1949 struct netlink_sock *nlk = nlk_sk(sk);
1950 unsigned int val = 0;
1953 if (level != SOL_NETLINK)
1954 return -ENOPROTOOPT;
1956 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
1957 optlen >= sizeof(int) &&
1958 get_user(val, (unsigned int __user *)optval))
1962 case NETLINK_PKTINFO:
1964 nlk->flags |= NETLINK_RECV_PKTINFO;
1966 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1969 case NETLINK_ADD_MEMBERSHIP:
1970 case NETLINK_DROP_MEMBERSHIP: {
1971 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1973 err = netlink_realloc_groups(sk);
1976 if (!val || val - 1 >= nlk->ngroups)
1978 netlink_table_grab();
1979 netlink_update_socket_mc(nlk, val,
1980 optname == NETLINK_ADD_MEMBERSHIP);
1981 netlink_table_ungrab();
1983 if (nlk->netlink_bind)
1984 nlk->netlink_bind(val);
1989 case NETLINK_BROADCAST_ERROR:
1991 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1993 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1996 case NETLINK_NO_ENOBUFS:
1998 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1999 clear_bit(NETLINK_CONGESTED, &nlk->state);
2000 wake_up_interruptible(&nlk->wait);
2002 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2006 #ifdef CONFIG_NETLINK_MMAP
2007 case NETLINK_RX_RING:
2008 case NETLINK_TX_RING: {
2009 struct nl_mmap_req req;
2011 /* Rings might consume more memory than queue limits, require
2014 if (!capable(CAP_NET_ADMIN))
2016 if (optlen < sizeof(req))
2018 if (copy_from_user(&req, optval, sizeof(req)))
2020 err = netlink_set_ring(sk, &req, false,
2021 optname == NETLINK_TX_RING);
2024 #endif /* CONFIG_NETLINK_MMAP */
2031 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2032 char __user *optval, int __user *optlen)
2034 struct sock *sk = sock->sk;
2035 struct netlink_sock *nlk = nlk_sk(sk);
2038 if (level != SOL_NETLINK)
2039 return -ENOPROTOOPT;
2041 if (get_user(len, optlen))
2047 case NETLINK_PKTINFO:
2048 if (len < sizeof(int))
2051 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2052 if (put_user(len, optlen) ||
2053 put_user(val, optval))
2057 case NETLINK_BROADCAST_ERROR:
2058 if (len < sizeof(int))
2061 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2062 if (put_user(len, optlen) ||
2063 put_user(val, optval))
2067 case NETLINK_NO_ENOBUFS:
2068 if (len < sizeof(int))
2071 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2072 if (put_user(len, optlen) ||
2073 put_user(val, optval))
2083 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2085 struct nl_pktinfo info;
2087 info.group = NETLINK_CB(skb).dst_group;
2088 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2091 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2092 struct msghdr *msg, size_t len)
2094 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2095 struct sock *sk = sock->sk;
2096 struct netlink_sock *nlk = nlk_sk(sk);
2097 struct sockaddr_nl *addr = msg->msg_name;
2100 struct sk_buff *skb;
2102 struct scm_cookie scm;
2103 u32 netlink_skb_flags = 0;
2105 if (msg->msg_flags&MSG_OOB)
2108 if (NULL == siocb->scm)
2111 err = scm_send(sock, msg, siocb->scm, true);
2115 if (msg->msg_namelen) {
2117 if (addr->nl_family != AF_NETLINK)
2119 dst_portid = addr->nl_pid;
2120 dst_group = ffs(addr->nl_groups);
2122 if ((dst_group || dst_portid) &&
2123 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2125 netlink_skb_flags |= NETLINK_SKB_DST;
2127 dst_portid = nlk->dst_portid;
2128 dst_group = nlk->dst_group;
2132 err = netlink_autobind(sock);
2137 if (netlink_tx_is_mmaped(sk) &&
2138 msg->msg_iov->iov_base == NULL) {
2139 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2145 if (len > sk->sk_sndbuf - 32)
2148 skb = alloc_skb(len, GFP_KERNEL);
2152 NETLINK_CB(skb).portid = nlk->portid;
2153 NETLINK_CB(skb).dst_group = dst_group;
2154 NETLINK_CB(skb).creds = siocb->scm->creds;
2155 NETLINK_CB(skb).flags = netlink_skb_flags;
2158 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2163 err = security_netlink_send(sk, skb);
2170 atomic_inc(&skb->users);
2171 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2173 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2176 scm_destroy(siocb->scm);
2180 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2181 struct msghdr *msg, size_t len,
2184 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2185 struct scm_cookie scm;
2186 struct sock *sk = sock->sk;
2187 struct netlink_sock *nlk = nlk_sk(sk);
2188 int noblock = flags&MSG_DONTWAIT;
2190 struct sk_buff *skb, *data_skb;
2198 skb = skb_recv_datagram(sk, flags, noblock, &err);
2204 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2205 if (unlikely(skb_shinfo(skb)->frag_list)) {
2207 * If this skb has a frag_list, then here that means that we
2208 * will have to use the frag_list skb's data for compat tasks
2209 * and the regular skb's data for normal (non-compat) tasks.
2211 * If we need to send the compat skb, assign it to the
2212 * 'data_skb' variable so that it will be used below for data
2213 * copying. We keep 'skb' for everything else, including
2214 * freeing both later.
2216 if (flags & MSG_CMSG_COMPAT)
2217 data_skb = skb_shinfo(skb)->frag_list;
2221 copied = data_skb->len;
2223 msg->msg_flags |= MSG_TRUNC;
2227 skb_reset_transport_header(data_skb);
2228 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2230 if (msg->msg_name) {
2231 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
2232 addr->nl_family = AF_NETLINK;
2234 addr->nl_pid = NETLINK_CB(skb).portid;
2235 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2236 msg->msg_namelen = sizeof(*addr);
2239 if (nlk->flags & NETLINK_RECV_PKTINFO)
2240 netlink_cmsg_recv_pktinfo(msg, skb);
2242 if (NULL == siocb->scm) {
2243 memset(&scm, 0, sizeof(scm));
2246 siocb->scm->creds = *NETLINK_CREDS(skb);
2247 if (flags & MSG_TRUNC)
2248 copied = data_skb->len;
2250 skb_free_datagram(sk, skb);
2252 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2253 ret = netlink_dump(sk);
2256 sk->sk_error_report(sk);
2260 scm_recv(sock, msg, siocb->scm, flags);
2262 netlink_rcv_wake(sk);
2263 return err ? : copied;
2266 static void netlink_data_ready(struct sock *sk, int len)
2272 * We export these functions to other modules. They provide a
2273 * complete set of kernel non-blocking support for message
2278 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2279 struct netlink_kernel_cfg *cfg)
2281 struct socket *sock;
2283 struct netlink_sock *nlk;
2284 struct listeners *listeners = NULL;
2285 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2286 unsigned int groups;
2290 if (unit < 0 || unit >= MAX_LINKS)
2293 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2297 * We have to just have a reference on the net from sk, but don't
2298 * get_net it. Besides, we cannot get and then put the net here.
2299 * So we create one inside init_net and the move it to net.
2302 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2303 goto out_sock_release_nosk;
2306 sk_change_net(sk, net);
2308 if (!cfg || cfg->groups < 32)
2311 groups = cfg->groups;
2313 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2315 goto out_sock_release;
2317 sk->sk_data_ready = netlink_data_ready;
2318 if (cfg && cfg->input)
2319 nlk_sk(sk)->netlink_rcv = cfg->input;
2321 if (netlink_insert(sk, net, 0))
2322 goto out_sock_release;
2325 nlk->flags |= NETLINK_KERNEL_SOCKET;
2327 netlink_table_grab();
2328 if (!nl_table[unit].registered) {
2329 nl_table[unit].groups = groups;
2330 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2331 nl_table[unit].cb_mutex = cb_mutex;
2332 nl_table[unit].module = module;
2334 nl_table[unit].bind = cfg->bind;
2335 nl_table[unit].flags = cfg->flags;
2337 nl_table[unit].registered = 1;
2340 nl_table[unit].registered++;
2342 netlink_table_ungrab();
2347 netlink_kernel_release(sk);
2350 out_sock_release_nosk:
2354 EXPORT_SYMBOL(__netlink_kernel_create);
2357 netlink_kernel_release(struct sock *sk)
2359 sk_release_kernel(sk);
2361 EXPORT_SYMBOL(netlink_kernel_release);
2363 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2365 struct listeners *new, *old;
2366 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2371 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2372 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2375 old = nl_deref_protected(tbl->listeners);
2376 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2377 rcu_assign_pointer(tbl->listeners, new);
2379 kfree_rcu(old, rcu);
2381 tbl->groups = groups;
2387 * netlink_change_ngroups - change number of multicast groups
2389 * This changes the number of multicast groups that are available
2390 * on a certain netlink family. Note that it is not possible to
2391 * change the number of groups to below 32. Also note that it does
2392 * not implicitly call netlink_clear_multicast_users() when the
2393 * number of groups is reduced.
2395 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2396 * @groups: The new number of groups.
2398 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2402 netlink_table_grab();
2403 err = __netlink_change_ngroups(sk, groups);
2404 netlink_table_ungrab();
2409 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2412 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2414 sk_for_each_bound(sk, &tbl->mc_list)
2415 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2419 * netlink_clear_multicast_users - kick off multicast listeners
2421 * This function removes all listeners from the given group.
2422 * @ksk: The kernel netlink socket, as returned by
2423 * netlink_kernel_create().
2424 * @group: The multicast group to clear.
2426 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2428 netlink_table_grab();
2429 __netlink_clear_multicast_users(ksk, group);
2430 netlink_table_ungrab();
2434 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2436 struct nlmsghdr *nlh;
2437 int size = nlmsg_msg_size(len);
2439 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2440 nlh->nlmsg_type = type;
2441 nlh->nlmsg_len = size;
2442 nlh->nlmsg_flags = flags;
2443 nlh->nlmsg_pid = portid;
2444 nlh->nlmsg_seq = seq;
2445 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2446 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2449 EXPORT_SYMBOL(__nlmsg_put);
2452 * It looks a bit ugly.
2453 * It would be better to create kernel thread.
2456 static int netlink_dump(struct sock *sk)
2458 struct netlink_sock *nlk = nlk_sk(sk);
2459 struct netlink_callback *cb;
2460 struct sk_buff *skb = NULL;
2461 struct nlmsghdr *nlh;
2462 int len, err = -ENOBUFS;
2465 mutex_lock(nlk->cb_mutex);
2473 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2475 if (!netlink_rx_is_mmaped(sk) &&
2476 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2478 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
2481 netlink_skb_set_owner_r(skb, sk);
2483 len = cb->dump(skb, cb);
2486 mutex_unlock(nlk->cb_mutex);
2488 if (sk_filter(sk, skb))
2491 __netlink_sendskb(sk, skb);
2495 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2499 nl_dump_check_consistent(cb, nlh);
2501 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2503 if (sk_filter(sk, skb))
2506 __netlink_sendskb(sk, skb);
2511 mutex_unlock(nlk->cb_mutex);
2513 module_put(cb->module);
2514 netlink_consume_callback(cb);
2518 mutex_unlock(nlk->cb_mutex);
2523 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2524 const struct nlmsghdr *nlh,
2525 struct netlink_dump_control *control)
2527 struct netlink_callback *cb;
2529 struct netlink_sock *nlk;
2532 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2536 /* Memory mapped dump requests need to be copied to avoid looping
2537 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2538 * a reference to the skb.
2540 if (netlink_skb_is_mmaped(skb)) {
2541 skb = skb_copy(skb, GFP_KERNEL);
2547 atomic_inc(&skb->users);
2549 cb->dump = control->dump;
2550 cb->done = control->done;
2552 cb->data = control->data;
2553 cb->module = control->module;
2554 cb->min_dump_alloc = control->min_dump_alloc;
2557 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2559 netlink_destroy_callback(cb);
2560 return -ECONNREFUSED;
2564 mutex_lock(nlk->cb_mutex);
2565 /* A dump is in progress... */
2567 mutex_unlock(nlk->cb_mutex);
2568 netlink_destroy_callback(cb);
2572 /* add reference of module which cb->dump belongs to */
2573 if (!try_module_get(cb->module)) {
2574 mutex_unlock(nlk->cb_mutex);
2575 netlink_destroy_callback(cb);
2576 ret = -EPROTONOSUPPORT;
2581 mutex_unlock(nlk->cb_mutex);
2583 ret = netlink_dump(sk);
2590 /* We successfully started a dump, by returning -EINTR we
2591 * signal not to send ACK even if it was requested.
2595 EXPORT_SYMBOL(__netlink_dump_start);
2597 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2599 struct sk_buff *skb;
2600 struct nlmsghdr *rep;
2601 struct nlmsgerr *errmsg;
2602 size_t payload = sizeof(*errmsg);
2604 /* error messages get the original request appened */
2606 payload += nlmsg_len(nlh);
2608 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2609 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2613 sk = netlink_lookup(sock_net(in_skb->sk),
2614 in_skb->sk->sk_protocol,
2615 NETLINK_CB(in_skb).portid);
2617 sk->sk_err = ENOBUFS;
2618 sk->sk_error_report(sk);
2624 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2625 NLMSG_ERROR, payload, 0);
2626 errmsg = nlmsg_data(rep);
2627 errmsg->error = err;
2628 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2629 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2631 EXPORT_SYMBOL(netlink_ack);
2633 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2636 struct nlmsghdr *nlh;
2639 while (skb->len >= nlmsg_total_size(0)) {
2642 nlh = nlmsg_hdr(skb);
2645 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2648 /* Only requests are handled by the kernel */
2649 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2652 /* Skip control messages */
2653 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2661 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2662 netlink_ack(skb, nlh, err);
2665 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2666 if (msglen > skb->len)
2668 skb_pull(skb, msglen);
2673 EXPORT_SYMBOL(netlink_rcv_skb);
2676 * nlmsg_notify - send a notification netlink message
2677 * @sk: netlink socket to use
2678 * @skb: notification message
2679 * @portid: destination netlink portid for reports or 0
2680 * @group: destination multicast group or 0
2681 * @report: 1 to report back, 0 to disable
2682 * @flags: allocation flags
2684 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2685 unsigned int group, int report, gfp_t flags)
2690 int exclude_portid = 0;
2693 atomic_inc(&skb->users);
2694 exclude_portid = portid;
2697 /* errors reported via destination sk->sk_err, but propagate
2698 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2699 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2705 err2 = nlmsg_unicast(sk, skb, portid);
2706 if (!err || err == -ESRCH)
2712 EXPORT_SYMBOL(nlmsg_notify);
2714 #ifdef CONFIG_PROC_FS
2715 struct nl_seq_iter {
2716 struct seq_net_private p;
2721 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2723 struct nl_seq_iter *iter = seq->private;
2728 for (i = 0; i < MAX_LINKS; i++) {
2729 struct nl_portid_hash *hash = &nl_table[i].hash;
2731 for (j = 0; j <= hash->mask; j++) {
2732 sk_for_each(s, &hash->table[j]) {
2733 if (sock_net(s) != seq_file_net(seq))
2747 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2748 __acquires(nl_table_lock)
2750 read_lock(&nl_table_lock);
2751 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2754 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2757 struct nl_seq_iter *iter;
2762 if (v == SEQ_START_TOKEN)
2763 return netlink_seq_socket_idx(seq, 0);
2765 iter = seq->private;
2769 } while (s && sock_net(s) != seq_file_net(seq));
2774 j = iter->hash_idx + 1;
2777 struct nl_portid_hash *hash = &nl_table[i].hash;
2779 for (; j <= hash->mask; j++) {
2780 s = sk_head(&hash->table[j]);
2781 while (s && sock_net(s) != seq_file_net(seq))
2791 } while (++i < MAX_LINKS);
2796 static void netlink_seq_stop(struct seq_file *seq, void *v)
2797 __releases(nl_table_lock)
2799 read_unlock(&nl_table_lock);
2803 static int netlink_seq_show(struct seq_file *seq, void *v)
2805 if (v == SEQ_START_TOKEN) {
2807 "sk Eth Pid Groups "
2808 "Rmem Wmem Dump Locks Drops Inode\n");
2811 struct netlink_sock *nlk = nlk_sk(s);
2813 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2817 nlk->groups ? (u32)nlk->groups[0] : 0,
2818 sk_rmem_alloc_get(s),
2819 sk_wmem_alloc_get(s),
2821 atomic_read(&s->sk_refcnt),
2822 atomic_read(&s->sk_drops),
2830 static const struct seq_operations netlink_seq_ops = {
2831 .start = netlink_seq_start,
2832 .next = netlink_seq_next,
2833 .stop = netlink_seq_stop,
2834 .show = netlink_seq_show,
2838 static int netlink_seq_open(struct inode *inode, struct file *file)
2840 return seq_open_net(inode, file, &netlink_seq_ops,
2841 sizeof(struct nl_seq_iter));
2844 static const struct file_operations netlink_seq_fops = {
2845 .owner = THIS_MODULE,
2846 .open = netlink_seq_open,
2848 .llseek = seq_lseek,
2849 .release = seq_release_net,
2854 int netlink_register_notifier(struct notifier_block *nb)
2856 return atomic_notifier_chain_register(&netlink_chain, nb);
2858 EXPORT_SYMBOL(netlink_register_notifier);
2860 int netlink_unregister_notifier(struct notifier_block *nb)
2862 return atomic_notifier_chain_unregister(&netlink_chain, nb);
2864 EXPORT_SYMBOL(netlink_unregister_notifier);
2866 static const struct proto_ops netlink_ops = {
2867 .family = PF_NETLINK,
2868 .owner = THIS_MODULE,
2869 .release = netlink_release,
2870 .bind = netlink_bind,
2871 .connect = netlink_connect,
2872 .socketpair = sock_no_socketpair,
2873 .accept = sock_no_accept,
2874 .getname = netlink_getname,
2875 .poll = netlink_poll,
2876 .ioctl = sock_no_ioctl,
2877 .listen = sock_no_listen,
2878 .shutdown = sock_no_shutdown,
2879 .setsockopt = netlink_setsockopt,
2880 .getsockopt = netlink_getsockopt,
2881 .sendmsg = netlink_sendmsg,
2882 .recvmsg = netlink_recvmsg,
2883 .mmap = netlink_mmap,
2884 .sendpage = sock_no_sendpage,
2887 static const struct net_proto_family netlink_family_ops = {
2888 .family = PF_NETLINK,
2889 .create = netlink_create,
2890 .owner = THIS_MODULE, /* for consistency 8) */
2893 static int __net_init netlink_net_init(struct net *net)
2895 #ifdef CONFIG_PROC_FS
2896 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2902 static void __net_exit netlink_net_exit(struct net *net)
2904 #ifdef CONFIG_PROC_FS
2905 remove_proc_entry("netlink", net->proc_net);
2909 static void __init netlink_add_usersock_entry(void)
2911 struct listeners *listeners;
2914 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2916 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2918 netlink_table_grab();
2920 nl_table[NETLINK_USERSOCK].groups = groups;
2921 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2922 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2923 nl_table[NETLINK_USERSOCK].registered = 1;
2924 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2926 netlink_table_ungrab();
2929 static struct pernet_operations __net_initdata netlink_net_ops = {
2930 .init = netlink_net_init,
2931 .exit = netlink_net_exit,
2934 static int __init netlink_proto_init(void)
2937 unsigned long limit;
2939 int err = proto_register(&netlink_proto, 0);
2944 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2946 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2950 if (totalram_pages >= (128 * 1024))
2951 limit = totalram_pages >> (21 - PAGE_SHIFT);
2953 limit = totalram_pages >> (23 - PAGE_SHIFT);
2955 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2956 limit = (1UL << order) / sizeof(struct hlist_head);
2957 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2959 for (i = 0; i < MAX_LINKS; i++) {
2960 struct nl_portid_hash *hash = &nl_table[i].hash;
2962 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
2965 nl_portid_hash_free(nl_table[i].hash.table,
2966 1 * sizeof(*hash->table));
2970 hash->max_shift = order;
2973 hash->rehash_time = jiffies;
2976 netlink_add_usersock_entry();
2978 sock_register(&netlink_family_ops);
2979 register_pernet_subsys(&netlink_net_ops);
2980 /* The netlink device handler may be needed early. */
2985 panic("netlink_init: Cannot allocate nl_table\n");
2988 core_initcall(netlink_proto_init);