2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <net/net_namespace.h>
67 #include <net/protocol.h>
68 #include <linux/skbuff.h>
70 #include <linux/errno.h>
71 #include <linux/timer.h>
72 #include <asm/system.h>
73 #include <asm/uaccess.h>
74 #include <asm/ioctls.h>
76 #include <asm/cacheflush.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
90 #include <net/inet_common.h>
95 - if device has no dev->hard_header routine, it adds and removes ll header
96 inside itself. In this case ll header is invisible outside of device,
97 but higher levels still should reserve dev->hard_header_len.
98 Some devices are enough clever to reallocate skb, when header
99 will not fit to reserved space (tunnel), another ones are silly
101 - packet socket receives packets with pulled ll header,
102 so that SOCK_RAW should push it back.
107 Incoming, dev->hard_header!=NULL
108 mac_header -> ll header
111 Outgoing, dev->hard_header!=NULL
112 mac_header -> ll header
115 Incoming, dev->hard_header==NULL
116 mac_header -> UNKNOWN position. It is very likely, that it points to ll
117 header. PPP makes it, that is wrong, because introduce
118 assymetry between rx and tx paths.
121 Outgoing, dev->hard_header==NULL
122 mac_header -> data. ll header is still not built!
126 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 dev->hard_header != NULL
133 mac_header -> ll header
136 dev->hard_header == NULL (ll header is added by device, we cannot control it)
140 We should set nh.raw on output to correct posistion,
141 packet classifier depends on it.
144 /* Private packet socket structures. */
146 struct packet_mclist {
147 struct packet_mclist *next;
152 unsigned char addr[MAX_ADDR_LEN];
154 /* identical to struct packet_mreq except it has
155 * a longer address field.
157 struct packet_mreq_max {
159 unsigned short mr_type;
160 unsigned short mr_alen;
161 unsigned char mr_address[MAX_ADDR_LEN];
164 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
165 int closing, int tx_ring);
171 struct packet_ring_buffer {
174 unsigned int frames_per_block;
175 unsigned int frame_size;
176 unsigned int frame_max;
178 unsigned int pg_vec_order;
179 unsigned int pg_vec_pages;
180 unsigned int pg_vec_len;
186 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
188 static void packet_flush_mclist(struct sock *sk);
191 /* struct sock has to be the first member of packet_sock */
193 struct tpacket_stats stats;
194 struct packet_ring_buffer rx_ring;
195 struct packet_ring_buffer tx_ring;
197 spinlock_t bind_lock;
198 struct mutex pg_vec_lock;
199 unsigned int running:1, /* prot_hook is attached*/
203 int ifindex; /* bound device */
205 struct packet_mclist *mclist;
207 enum tpacket_versions tp_version;
208 unsigned int tp_hdrlen;
209 unsigned int tp_reserve;
210 unsigned int tp_loss:1;
211 unsigned int tp_tstamp;
212 struct packet_type prot_hook ____cacheline_aligned_in_smp;
215 struct packet_skb_cb {
216 unsigned int origlen;
218 struct sockaddr_pkt pkt;
219 struct sockaddr_ll ll;
223 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
225 static inline __pure struct page *pgv_to_page(void *addr)
227 if (is_vmalloc_addr(addr))
228 return vmalloc_to_page(addr);
229 return virt_to_page(addr);
232 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
235 struct tpacket_hdr *h1;
236 struct tpacket2_hdr *h2;
241 switch (po->tp_version) {
243 h.h1->tp_status = status;
244 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
247 h.h2->tp_status = status;
248 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
251 pr_err("TPACKET version not supported\n");
258 static int __packet_get_status(struct packet_sock *po, void *frame)
261 struct tpacket_hdr *h1;
262 struct tpacket2_hdr *h2;
269 switch (po->tp_version) {
271 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
272 return h.h1->tp_status;
274 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
275 return h.h2->tp_status;
277 pr_err("TPACKET version not supported\n");
283 static void *packet_lookup_frame(struct packet_sock *po,
284 struct packet_ring_buffer *rb,
285 unsigned int position,
288 unsigned int pg_vec_pos, frame_offset;
290 struct tpacket_hdr *h1;
291 struct tpacket2_hdr *h2;
295 pg_vec_pos = position / rb->frames_per_block;
296 frame_offset = position % rb->frames_per_block;
298 h.raw = rb->pg_vec[pg_vec_pos].buffer +
299 (frame_offset * rb->frame_size);
301 if (status != __packet_get_status(po, h.raw))
307 static inline void *packet_current_frame(struct packet_sock *po,
308 struct packet_ring_buffer *rb,
311 return packet_lookup_frame(po, rb, rb->head, status);
314 static inline void *packet_previous_frame(struct packet_sock *po,
315 struct packet_ring_buffer *rb,
318 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
319 return packet_lookup_frame(po, rb, previous, status);
322 static inline void packet_increment_head(struct packet_ring_buffer *buff)
324 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
327 static inline struct packet_sock *pkt_sk(struct sock *sk)
329 return (struct packet_sock *)sk;
332 static void packet_sock_destruct(struct sock *sk)
334 skb_queue_purge(&sk->sk_error_queue);
336 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
337 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
339 if (!sock_flag(sk, SOCK_DEAD)) {
340 pr_err("Attempt to release alive packet socket: %p\n", sk);
344 sk_refcnt_debug_dec(sk);
348 static const struct proto_ops packet_ops;
350 static const struct proto_ops packet_ops_spkt;
352 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
353 struct packet_type *pt, struct net_device *orig_dev)
356 struct sockaddr_pkt *spkt;
359 * When we registered the protocol we saved the socket in the data
360 * field for just this event.
363 sk = pt->af_packet_priv;
366 * Yank back the headers [hope the device set this
367 * right or kerboom...]
369 * Incoming packets have ll header pulled,
372 * For outgoing ones skb->data == skb_mac_header(skb)
373 * so that this procedure is noop.
376 if (skb->pkt_type == PACKET_LOOPBACK)
379 if (!net_eq(dev_net(dev), sock_net(sk)))
382 skb = skb_share_check(skb, GFP_ATOMIC);
386 /* drop any routing info */
389 /* drop conntrack reference */
392 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
394 skb_push(skb, skb->data - skb_mac_header(skb));
397 * The SOCK_PACKET socket receives _all_ frames.
400 spkt->spkt_family = dev->type;
401 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
402 spkt->spkt_protocol = skb->protocol;
405 * Charge the memory to the socket. This is done specifically
406 * to prevent sockets using all the memory up.
409 if (sock_queue_rcv_skb(sk, skb) == 0)
420 * Output a raw packet to a device layer. This bypasses all the other
421 * protocol layers and you must therefore supply it with a complete frame
424 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
425 struct msghdr *msg, size_t len)
427 struct sock *sk = sock->sk;
428 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
429 struct sk_buff *skb = NULL;
430 struct net_device *dev;
435 * Get and verify the address.
439 if (msg->msg_namelen < sizeof(struct sockaddr))
441 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
442 proto = saddr->spkt_protocol;
444 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
447 * Find the device first to size check it
450 saddr->spkt_device[13] = 0;
453 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
459 if (!(dev->flags & IFF_UP))
463 * You may not queue a frame bigger than the mtu. This is the lowest level
464 * raw protocol and you must do your own fragmentation at this level.
468 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
472 size_t reserved = LL_RESERVED_SPACE(dev);
473 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
476 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
479 /* FIXME: Save some space for broken drivers that write a hard
480 * header at transmission time by themselves. PPP is the notable
481 * one here. This should really be fixed at the driver level.
483 skb_reserve(skb, reserved);
484 skb_reset_network_header(skb);
486 /* Try to align data part correctly */
491 skb_reset_network_header(skb);
493 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
499 if (len > (dev->mtu + dev->hard_header_len)) {
500 /* Earlier code assumed this would be a VLAN pkt,
501 * double-check this now that we have the actual
505 skb_reset_mac_header(skb);
507 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
513 skb->protocol = proto;
515 skb->priority = sk->sk_priority;
516 skb->mark = sk->sk_mark;
517 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
532 static inline unsigned int run_filter(const struct sk_buff *skb,
533 const struct sock *sk,
536 struct sk_filter *filter;
539 filter = rcu_dereference(sk->sk_filter);
541 res = SK_RUN_FILTER(filter, skb);
548 * This function makes lazy skb cloning in hope that most of packets
549 * are discarded by BPF.
551 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
552 * and skb->cb are mangled. It works because (and until) packets
553 * falling here are owned by current CPU. Output packets are cloned
554 * by dev_queue_xmit_nit(), input packets are processed by net_bh
555 * sequencially, so that if we return skb to original state on exit,
556 * we will not harm anyone.
559 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
560 struct packet_type *pt, struct net_device *orig_dev)
563 struct sockaddr_ll *sll;
564 struct packet_sock *po;
565 u8 *skb_head = skb->data;
566 int skb_len = skb->len;
567 unsigned int snaplen, res;
569 if (skb->pkt_type == PACKET_LOOPBACK)
572 sk = pt->af_packet_priv;
575 if (!net_eq(dev_net(dev), sock_net(sk)))
580 if (dev->header_ops) {
581 /* The device has an explicit notion of ll header,
582 * exported to higher levels.
584 * Otherwise, the device hides details of its frame
585 * structure, so that corresponding packet head is
586 * never delivered to user.
588 if (sk->sk_type != SOCK_DGRAM)
589 skb_push(skb, skb->data - skb_mac_header(skb));
590 else if (skb->pkt_type == PACKET_OUTGOING) {
591 /* Special case: outgoing packets have ll header at head */
592 skb_pull(skb, skb_network_offset(skb));
598 res = run_filter(skb, sk, snaplen);
604 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
605 (unsigned)sk->sk_rcvbuf)
608 if (skb_shared(skb)) {
609 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
613 if (skb_head != skb->data) {
614 skb->data = skb_head;
621 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
624 sll = &PACKET_SKB_CB(skb)->sa.ll;
625 sll->sll_family = AF_PACKET;
626 sll->sll_hatype = dev->type;
627 sll->sll_protocol = skb->protocol;
628 sll->sll_pkttype = skb->pkt_type;
629 if (unlikely(po->origdev))
630 sll->sll_ifindex = orig_dev->ifindex;
632 sll->sll_ifindex = dev->ifindex;
634 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
636 PACKET_SKB_CB(skb)->origlen = skb->len;
638 if (pskb_trim(skb, snaplen))
641 skb_set_owner_r(skb, sk);
645 /* drop conntrack reference */
648 spin_lock(&sk->sk_receive_queue.lock);
649 po->stats.tp_packets++;
650 skb->dropcount = atomic_read(&sk->sk_drops);
651 __skb_queue_tail(&sk->sk_receive_queue, skb);
652 spin_unlock(&sk->sk_receive_queue.lock);
653 sk->sk_data_ready(sk, skb->len);
657 spin_lock(&sk->sk_receive_queue.lock);
658 po->stats.tp_drops++;
659 atomic_inc(&sk->sk_drops);
660 spin_unlock(&sk->sk_receive_queue.lock);
663 if (skb_head != skb->data && skb_shared(skb)) {
664 skb->data = skb_head;
672 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
673 struct packet_type *pt, struct net_device *orig_dev)
676 struct packet_sock *po;
677 struct sockaddr_ll *sll;
679 struct tpacket_hdr *h1;
680 struct tpacket2_hdr *h2;
683 u8 *skb_head = skb->data;
684 int skb_len = skb->len;
685 unsigned int snaplen, res;
686 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
687 unsigned short macoff, netoff, hdrlen;
688 struct sk_buff *copy_skb = NULL;
691 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
693 if (skb->pkt_type == PACKET_LOOPBACK)
696 sk = pt->af_packet_priv;
699 if (!net_eq(dev_net(dev), sock_net(sk)))
702 if (dev->header_ops) {
703 if (sk->sk_type != SOCK_DGRAM)
704 skb_push(skb, skb->data - skb_mac_header(skb));
705 else if (skb->pkt_type == PACKET_OUTGOING) {
706 /* Special case: outgoing packets have ll header at head */
707 skb_pull(skb, skb_network_offset(skb));
711 if (skb->ip_summed == CHECKSUM_PARTIAL)
712 status |= TP_STATUS_CSUMNOTREADY;
716 res = run_filter(skb, sk, snaplen);
722 if (sk->sk_type == SOCK_DGRAM) {
723 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
726 unsigned maclen = skb_network_offset(skb);
727 netoff = TPACKET_ALIGN(po->tp_hdrlen +
728 (maclen < 16 ? 16 : maclen)) +
730 macoff = netoff - maclen;
733 if (macoff + snaplen > po->rx_ring.frame_size) {
734 if (po->copy_thresh &&
735 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
736 (unsigned)sk->sk_rcvbuf) {
737 if (skb_shared(skb)) {
738 copy_skb = skb_clone(skb, GFP_ATOMIC);
740 copy_skb = skb_get(skb);
741 skb_head = skb->data;
744 skb_set_owner_r(copy_skb, sk);
746 snaplen = po->rx_ring.frame_size - macoff;
747 if ((int)snaplen < 0)
751 spin_lock(&sk->sk_receive_queue.lock);
752 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
755 packet_increment_head(&po->rx_ring);
756 po->stats.tp_packets++;
758 status |= TP_STATUS_COPY;
759 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
761 if (!po->stats.tp_drops)
762 status &= ~TP_STATUS_LOSING;
763 spin_unlock(&sk->sk_receive_queue.lock);
765 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
767 switch (po->tp_version) {
769 h.h1->tp_len = skb->len;
770 h.h1->tp_snaplen = snaplen;
771 h.h1->tp_mac = macoff;
772 h.h1->tp_net = netoff;
773 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
774 && shhwtstamps->syststamp.tv64)
775 tv = ktime_to_timeval(shhwtstamps->syststamp);
776 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
777 && shhwtstamps->hwtstamp.tv64)
778 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
779 else if (skb->tstamp.tv64)
780 tv = ktime_to_timeval(skb->tstamp);
782 do_gettimeofday(&tv);
783 h.h1->tp_sec = tv.tv_sec;
784 h.h1->tp_usec = tv.tv_usec;
785 hdrlen = sizeof(*h.h1);
788 h.h2->tp_len = skb->len;
789 h.h2->tp_snaplen = snaplen;
790 h.h2->tp_mac = macoff;
791 h.h2->tp_net = netoff;
792 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
793 && shhwtstamps->syststamp.tv64)
794 ts = ktime_to_timespec(shhwtstamps->syststamp);
795 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
796 && shhwtstamps->hwtstamp.tv64)
797 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
798 else if (skb->tstamp.tv64)
799 ts = ktime_to_timespec(skb->tstamp);
802 h.h2->tp_sec = ts.tv_sec;
803 h.h2->tp_nsec = ts.tv_nsec;
804 if (vlan_tx_tag_present(skb)) {
805 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
806 status |= TP_STATUS_VLAN_VALID;
808 h.h2->tp_vlan_tci = 0;
810 h.h2->tp_padding = 0;
811 hdrlen = sizeof(*h.h2);
817 sll = h.raw + TPACKET_ALIGN(hdrlen);
818 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
819 sll->sll_family = AF_PACKET;
820 sll->sll_hatype = dev->type;
821 sll->sll_protocol = skb->protocol;
822 sll->sll_pkttype = skb->pkt_type;
823 if (unlikely(po->origdev))
824 sll->sll_ifindex = orig_dev->ifindex;
826 sll->sll_ifindex = dev->ifindex;
828 __packet_set_status(po, h.raw, status);
830 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
834 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
835 for (start = h.raw; start < end; start += PAGE_SIZE)
836 flush_dcache_page(pgv_to_page(start));
840 sk->sk_data_ready(sk, 0);
843 if (skb_head != skb->data && skb_shared(skb)) {
844 skb->data = skb_head;
852 po->stats.tp_drops++;
853 spin_unlock(&sk->sk_receive_queue.lock);
855 sk->sk_data_ready(sk, 0);
860 static void tpacket_destruct_skb(struct sk_buff *skb)
862 struct packet_sock *po = pkt_sk(skb->sk);
867 if (likely(po->tx_ring.pg_vec)) {
868 ph = skb_shinfo(skb)->destructor_arg;
869 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
870 atomic_dec(&po->tx_ring.pending);
871 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
877 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
878 void *frame, struct net_device *dev, int size_max,
879 __be16 proto, unsigned char *addr)
882 struct tpacket_hdr *h1;
883 struct tpacket2_hdr *h2;
886 int to_write, offset, len, tp_len, nr_frags, len_max;
887 struct socket *sock = po->sk.sk_socket;
894 skb->protocol = proto;
896 skb->priority = po->sk.sk_priority;
897 skb->mark = po->sk.sk_mark;
898 skb_shinfo(skb)->destructor_arg = ph.raw;
900 switch (po->tp_version) {
902 tp_len = ph.h2->tp_len;
905 tp_len = ph.h1->tp_len;
908 if (unlikely(tp_len > size_max)) {
909 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
913 skb_reserve(skb, LL_RESERVED_SPACE(dev));
914 skb_reset_network_header(skb);
916 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
919 if (sock->type == SOCK_DGRAM) {
920 err = dev_hard_header(skb, dev, ntohs(proto), addr,
922 if (unlikely(err < 0))
924 } else if (dev->hard_header_len) {
925 /* net device doesn't like empty head */
926 if (unlikely(tp_len <= dev->hard_header_len)) {
927 pr_err("packet size is too short (%d < %d)\n",
928 tp_len, dev->hard_header_len);
932 skb_push(skb, dev->hard_header_len);
933 err = skb_store_bits(skb, 0, data,
934 dev->hard_header_len);
938 data += dev->hard_header_len;
939 to_write -= dev->hard_header_len;
943 offset = offset_in_page(data);
944 len_max = PAGE_SIZE - offset;
945 len = ((to_write > len_max) ? len_max : to_write);
947 skb->data_len = to_write;
948 skb->len += to_write;
949 skb->truesize += to_write;
950 atomic_add(to_write, &po->sk.sk_wmem_alloc);
952 while (likely(to_write)) {
953 nr_frags = skb_shinfo(skb)->nr_frags;
955 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
956 pr_err("Packet exceed the number of skb frags(%lu)\n",
961 page = pgv_to_page(data);
963 flush_dcache_page(page);
965 skb_fill_page_desc(skb, nr_frags, page, offset, len);
969 len = ((to_write > len_max) ? len_max : to_write);
975 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
978 struct net_device *dev;
980 int ifindex, err, reserve = 0;
982 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
983 int tp_len, size_max;
988 mutex_lock(&po->pg_vec_lock);
992 ifindex = po->ifindex;
997 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
999 if (msg->msg_namelen < (saddr->sll_halen
1000 + offsetof(struct sockaddr_ll,
1003 ifindex = saddr->sll_ifindex;
1004 proto = saddr->sll_protocol;
1005 addr = saddr->sll_addr;
1008 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
1010 if (unlikely(dev == NULL))
1013 reserve = dev->hard_header_len;
1016 if (unlikely(!(dev->flags & IFF_UP)))
1019 size_max = po->tx_ring.frame_size
1020 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
1022 if (size_max > dev->mtu + reserve)
1023 size_max = dev->mtu + reserve;
1026 ph = packet_current_frame(po, &po->tx_ring,
1027 TP_STATUS_SEND_REQUEST);
1029 if (unlikely(ph == NULL)) {
1034 status = TP_STATUS_SEND_REQUEST;
1035 skb = sock_alloc_send_skb(&po->sk,
1036 LL_ALLOCATED_SPACE(dev)
1037 + sizeof(struct sockaddr_ll),
1040 if (unlikely(skb == NULL))
1043 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1046 if (unlikely(tp_len < 0)) {
1048 __packet_set_status(po, ph,
1049 TP_STATUS_AVAILABLE);
1050 packet_increment_head(&po->tx_ring);
1054 status = TP_STATUS_WRONG_FORMAT;
1060 skb->destructor = tpacket_destruct_skb;
1061 __packet_set_status(po, ph, TP_STATUS_SENDING);
1062 atomic_inc(&po->tx_ring.pending);
1064 status = TP_STATUS_SEND_REQUEST;
1065 err = dev_queue_xmit(skb);
1066 if (unlikely(err > 0)) {
1067 err = net_xmit_errno(err);
1068 if (err && __packet_get_status(po, ph) ==
1069 TP_STATUS_AVAILABLE) {
1070 /* skb was destructed already */
1075 * skb was dropped but not destructed yet;
1076 * let's treat it like congestion or err < 0
1080 packet_increment_head(&po->tx_ring);
1082 } while (likely((ph != NULL) ||
1083 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1084 (atomic_read(&po->tx_ring.pending))))
1091 __packet_set_status(po, ph, status);
1096 mutex_unlock(&po->pg_vec_lock);
1100 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1101 size_t reserve, size_t len,
1102 size_t linear, int noblock,
1105 struct sk_buff *skb;
1107 /* Under a page? Don't bother with paged skb. */
1108 if (prepad + len < PAGE_SIZE || !linear)
1111 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1116 skb_reserve(skb, reserve);
1117 skb_put(skb, linear);
1118 skb->data_len = len - linear;
1119 skb->len += len - linear;
1124 static int packet_snd(struct socket *sock,
1125 struct msghdr *msg, size_t len)
1127 struct sock *sk = sock->sk;
1128 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1129 struct sk_buff *skb;
1130 struct net_device *dev;
1132 unsigned char *addr;
1133 int ifindex, err, reserve = 0;
1134 struct virtio_net_hdr vnet_hdr = { 0 };
1137 struct packet_sock *po = pkt_sk(sk);
1138 unsigned short gso_type = 0;
1141 * Get and verify the address.
1144 if (saddr == NULL) {
1145 ifindex = po->ifindex;
1150 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1152 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1154 ifindex = saddr->sll_ifindex;
1155 proto = saddr->sll_protocol;
1156 addr = saddr->sll_addr;
1160 dev = dev_get_by_index(sock_net(sk), ifindex);
1164 if (sock->type == SOCK_RAW)
1165 reserve = dev->hard_header_len;
1168 if (!(dev->flags & IFF_UP))
1171 if (po->has_vnet_hdr) {
1172 vnet_hdr_len = sizeof(vnet_hdr);
1175 if (len < vnet_hdr_len)
1178 len -= vnet_hdr_len;
1180 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1185 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1186 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1188 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1189 vnet_hdr.csum_offset + 2;
1192 if (vnet_hdr.hdr_len > len)
1195 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1196 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1197 case VIRTIO_NET_HDR_GSO_TCPV4:
1198 gso_type = SKB_GSO_TCPV4;
1200 case VIRTIO_NET_HDR_GSO_TCPV6:
1201 gso_type = SKB_GSO_TCPV6;
1203 case VIRTIO_NET_HDR_GSO_UDP:
1204 gso_type = SKB_GSO_UDP;
1210 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1211 gso_type |= SKB_GSO_TCP_ECN;
1213 if (vnet_hdr.gso_size == 0)
1220 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
1224 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1225 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1226 msg->msg_flags & MSG_DONTWAIT, &err);
1230 skb_set_network_header(skb, reserve);
1233 if (sock->type == SOCK_DGRAM &&
1234 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1237 /* Returns -EFAULT on error */
1238 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1241 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1245 if (!gso_type && (len > dev->mtu + reserve)) {
1246 /* Earlier code assumed this would be a VLAN pkt,
1247 * double-check this now that we have the actual
1250 struct ethhdr *ehdr;
1251 skb_reset_mac_header(skb);
1252 ehdr = eth_hdr(skb);
1253 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1259 skb->protocol = proto;
1261 skb->priority = sk->sk_priority;
1262 skb->mark = sk->sk_mark;
1264 if (po->has_vnet_hdr) {
1265 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1266 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1267 vnet_hdr.csum_offset)) {
1273 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1274 skb_shinfo(skb)->gso_type = gso_type;
1276 /* Header must be checked, and gso_segs computed. */
1277 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1278 skb_shinfo(skb)->gso_segs = 0;
1280 len += vnet_hdr_len;
1287 err = dev_queue_xmit(skb);
1288 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1304 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1305 struct msghdr *msg, size_t len)
1307 struct sock *sk = sock->sk;
1308 struct packet_sock *po = pkt_sk(sk);
1309 if (po->tx_ring.pg_vec)
1310 return tpacket_snd(po, msg);
1312 return packet_snd(sock, msg, len);
1316 * Close a PACKET socket. This is fairly simple. We immediately go
1317 * to 'closed' state and remove our protocol entry in the device list.
1320 static int packet_release(struct socket *sock)
1322 struct sock *sk = sock->sk;
1323 struct packet_sock *po;
1325 struct tpacket_req req;
1333 spin_lock_bh(&net->packet.sklist_lock);
1334 sk_del_node_init_rcu(sk);
1335 sock_prot_inuse_add(net, sk->sk_prot, -1);
1336 spin_unlock_bh(&net->packet.sklist_lock);
1338 spin_lock(&po->bind_lock);
1341 * Remove from protocol table
1345 __dev_remove_pack(&po->prot_hook);
1348 spin_unlock(&po->bind_lock);
1350 packet_flush_mclist(sk);
1352 if (po->rx_ring.pg_vec) {
1353 memset(&req, 0, sizeof(req));
1354 packet_set_ring(sk, &req, 1, 0);
1357 if (po->tx_ring.pg_vec) {
1358 memset(&req, 0, sizeof(req));
1359 packet_set_ring(sk, &req, 1, 1);
1364 * Now the socket is dead. No more input will appear.
1371 skb_queue_purge(&sk->sk_receive_queue);
1372 sk_refcnt_debug_release(sk);
1379 * Attach a packet hook.
1382 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1384 struct packet_sock *po = pkt_sk(sk);
1386 * Detach an existing hook if present.
1391 spin_lock(&po->bind_lock);
1396 spin_unlock(&po->bind_lock);
1397 dev_remove_pack(&po->prot_hook);
1398 spin_lock(&po->bind_lock);
1402 po->prot_hook.type = protocol;
1403 po->prot_hook.dev = dev;
1405 po->ifindex = dev ? dev->ifindex : 0;
1410 if (!dev || (dev->flags & IFF_UP)) {
1411 dev_add_pack(&po->prot_hook);
1415 sk->sk_err = ENETDOWN;
1416 if (!sock_flag(sk, SOCK_DEAD))
1417 sk->sk_error_report(sk);
1421 spin_unlock(&po->bind_lock);
1427 * Bind a packet socket to a device
1430 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1433 struct sock *sk = sock->sk;
1435 struct net_device *dev;
1442 if (addr_len != sizeof(struct sockaddr))
1444 strlcpy(name, uaddr->sa_data, sizeof(name));
1446 dev = dev_get_by_name(sock_net(sk), name);
1448 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1454 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1456 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1457 struct sock *sk = sock->sk;
1458 struct net_device *dev = NULL;
1466 if (addr_len < sizeof(struct sockaddr_ll))
1468 if (sll->sll_family != AF_PACKET)
1471 if (sll->sll_ifindex) {
1473 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1477 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1485 static struct proto packet_proto = {
1487 .owner = THIS_MODULE,
1488 .obj_size = sizeof(struct packet_sock),
1492 * Create a packet of type SOCK_PACKET.
1495 static int packet_create(struct net *net, struct socket *sock, int protocol,
1499 struct packet_sock *po;
1500 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1503 if (!capable(CAP_NET_RAW))
1505 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1506 sock->type != SOCK_PACKET)
1507 return -ESOCKTNOSUPPORT;
1509 sock->state = SS_UNCONNECTED;
1512 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1516 sock->ops = &packet_ops;
1517 if (sock->type == SOCK_PACKET)
1518 sock->ops = &packet_ops_spkt;
1520 sock_init_data(sock, sk);
1523 sk->sk_family = PF_PACKET;
1526 sk->sk_destruct = packet_sock_destruct;
1527 sk_refcnt_debug_inc(sk);
1530 * Attach a protocol block
1533 spin_lock_init(&po->bind_lock);
1534 mutex_init(&po->pg_vec_lock);
1535 po->prot_hook.func = packet_rcv;
1537 if (sock->type == SOCK_PACKET)
1538 po->prot_hook.func = packet_rcv_spkt;
1540 po->prot_hook.af_packet_priv = sk;
1543 po->prot_hook.type = proto;
1544 dev_add_pack(&po->prot_hook);
1549 spin_lock_bh(&net->packet.sklist_lock);
1550 sk_add_node_rcu(sk, &net->packet.sklist);
1551 sock_prot_inuse_add(net, &packet_proto, 1);
1552 spin_unlock_bh(&net->packet.sklist_lock);
1559 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1561 struct sock_exterr_skb *serr;
1562 struct sk_buff *skb, *skb2;
1566 skb = skb_dequeue(&sk->sk_error_queue);
1572 msg->msg_flags |= MSG_TRUNC;
1575 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1579 sock_recv_timestamp(msg, sk, skb);
1581 serr = SKB_EXT_ERR(skb);
1582 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1583 sizeof(serr->ee), &serr->ee);
1585 msg->msg_flags |= MSG_ERRQUEUE;
1588 /* Reset and regenerate socket error */
1589 spin_lock_bh(&sk->sk_error_queue.lock);
1591 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1592 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1593 spin_unlock_bh(&sk->sk_error_queue.lock);
1594 sk->sk_error_report(sk);
1596 spin_unlock_bh(&sk->sk_error_queue.lock);
1605 * Pull a packet from our receive queue and hand it to the user.
1606 * If necessary we block.
1609 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1610 struct msghdr *msg, size_t len, int flags)
1612 struct sock *sk = sock->sk;
1613 struct sk_buff *skb;
1615 struct sockaddr_ll *sll;
1616 int vnet_hdr_len = 0;
1619 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1623 /* What error should we return now? EUNATTACH? */
1624 if (pkt_sk(sk)->ifindex < 0)
1628 if (flags & MSG_ERRQUEUE) {
1629 err = packet_recv_error(sk, msg, len);
1634 * Call the generic datagram receiver. This handles all sorts
1635 * of horrible races and re-entrancy so we can forget about it
1636 * in the protocol layers.
1638 * Now it will return ENETDOWN, if device have just gone down,
1639 * but then it will block.
1642 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1645 * An error occurred so return it. Because skb_recv_datagram()
1646 * handles the blocking we don't see and worry about blocking
1653 if (pkt_sk(sk)->has_vnet_hdr) {
1654 struct virtio_net_hdr vnet_hdr = { 0 };
1657 vnet_hdr_len = sizeof(vnet_hdr);
1658 if (len < vnet_hdr_len)
1661 len -= vnet_hdr_len;
1663 if (skb_is_gso(skb)) {
1664 struct skb_shared_info *sinfo = skb_shinfo(skb);
1666 /* This is a hint as to how much should be linear. */
1667 vnet_hdr.hdr_len = skb_headlen(skb);
1668 vnet_hdr.gso_size = sinfo->gso_size;
1669 if (sinfo->gso_type & SKB_GSO_TCPV4)
1670 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1671 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1672 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1673 else if (sinfo->gso_type & SKB_GSO_UDP)
1674 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1675 else if (sinfo->gso_type & SKB_GSO_FCOE)
1679 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1680 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1682 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1684 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1685 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1686 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
1687 vnet_hdr.csum_offset = skb->csum_offset;
1688 } /* else everything is zero */
1690 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1697 * If the address length field is there to be filled in, we fill
1701 sll = &PACKET_SKB_CB(skb)->sa.ll;
1702 if (sock->type == SOCK_PACKET)
1703 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1705 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1708 * You lose any data beyond the buffer you gave. If it worries a
1709 * user program they can ask the device for its MTU anyway.
1715 msg->msg_flags |= MSG_TRUNC;
1718 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1722 sock_recv_ts_and_drops(msg, sk, skb);
1725 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1728 if (pkt_sk(sk)->auxdata) {
1729 struct tpacket_auxdata aux;
1731 aux.tp_status = TP_STATUS_USER;
1732 if (skb->ip_summed == CHECKSUM_PARTIAL)
1733 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1734 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1735 aux.tp_snaplen = skb->len;
1737 aux.tp_net = skb_network_offset(skb);
1738 if (vlan_tx_tag_present(skb)) {
1739 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1740 aux.tp_status |= TP_STATUS_VLAN_VALID;
1742 aux.tp_vlan_tci = 0;
1745 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1749 * Free or return the buffer as appropriate. Again this
1750 * hides all the races and re-entrancy issues from us.
1752 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1755 skb_free_datagram(sk, skb);
1760 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1761 int *uaddr_len, int peer)
1763 struct net_device *dev;
1764 struct sock *sk = sock->sk;
1769 uaddr->sa_family = AF_PACKET;
1771 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1773 strncpy(uaddr->sa_data, dev->name, 14);
1775 memset(uaddr->sa_data, 0, 14);
1777 *uaddr_len = sizeof(*uaddr);
1782 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1783 int *uaddr_len, int peer)
1785 struct net_device *dev;
1786 struct sock *sk = sock->sk;
1787 struct packet_sock *po = pkt_sk(sk);
1788 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1793 sll->sll_family = AF_PACKET;
1794 sll->sll_ifindex = po->ifindex;
1795 sll->sll_protocol = po->num;
1796 sll->sll_pkttype = 0;
1798 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1800 sll->sll_hatype = dev->type;
1801 sll->sll_halen = dev->addr_len;
1802 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1804 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1808 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1813 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1817 case PACKET_MR_MULTICAST:
1818 if (i->alen != dev->addr_len)
1821 return dev_mc_add(dev, i->addr);
1823 return dev_mc_del(dev, i->addr);
1825 case PACKET_MR_PROMISC:
1826 return dev_set_promiscuity(dev, what);
1828 case PACKET_MR_ALLMULTI:
1829 return dev_set_allmulti(dev, what);
1831 case PACKET_MR_UNICAST:
1832 if (i->alen != dev->addr_len)
1835 return dev_uc_add(dev, i->addr);
1837 return dev_uc_del(dev, i->addr);
1845 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1847 for ( ; i; i = i->next) {
1848 if (i->ifindex == dev->ifindex)
1849 packet_dev_mc(dev, i, what);
1853 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1855 struct packet_sock *po = pkt_sk(sk);
1856 struct packet_mclist *ml, *i;
1857 struct net_device *dev;
1863 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1868 if (mreq->mr_alen > dev->addr_len)
1872 i = kmalloc(sizeof(*i), GFP_KERNEL);
1877 for (ml = po->mclist; ml; ml = ml->next) {
1878 if (ml->ifindex == mreq->mr_ifindex &&
1879 ml->type == mreq->mr_type &&
1880 ml->alen == mreq->mr_alen &&
1881 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1883 /* Free the new element ... */
1889 i->type = mreq->mr_type;
1890 i->ifindex = mreq->mr_ifindex;
1891 i->alen = mreq->mr_alen;
1892 memcpy(i->addr, mreq->mr_address, i->alen);
1894 i->next = po->mclist;
1896 err = packet_dev_mc(dev, i, 1);
1898 po->mclist = i->next;
1907 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1909 struct packet_mclist *ml, **mlp;
1913 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1914 if (ml->ifindex == mreq->mr_ifindex &&
1915 ml->type == mreq->mr_type &&
1916 ml->alen == mreq->mr_alen &&
1917 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1918 if (--ml->count == 0) {
1919 struct net_device *dev;
1921 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1923 packet_dev_mc(dev, ml, -1);
1931 return -EADDRNOTAVAIL;
1934 static void packet_flush_mclist(struct sock *sk)
1936 struct packet_sock *po = pkt_sk(sk);
1937 struct packet_mclist *ml;
1943 while ((ml = po->mclist) != NULL) {
1944 struct net_device *dev;
1946 po->mclist = ml->next;
1947 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1949 packet_dev_mc(dev, ml, -1);
1956 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1958 struct sock *sk = sock->sk;
1959 struct packet_sock *po = pkt_sk(sk);
1962 if (level != SOL_PACKET)
1963 return -ENOPROTOOPT;
1966 case PACKET_ADD_MEMBERSHIP:
1967 case PACKET_DROP_MEMBERSHIP:
1969 struct packet_mreq_max mreq;
1971 memset(&mreq, 0, sizeof(mreq));
1972 if (len < sizeof(struct packet_mreq))
1974 if (len > sizeof(mreq))
1976 if (copy_from_user(&mreq, optval, len))
1978 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1980 if (optname == PACKET_ADD_MEMBERSHIP)
1981 ret = packet_mc_add(sk, &mreq);
1983 ret = packet_mc_drop(sk, &mreq);
1987 case PACKET_RX_RING:
1988 case PACKET_TX_RING:
1990 struct tpacket_req req;
1992 if (optlen < sizeof(req))
1994 if (pkt_sk(sk)->has_vnet_hdr)
1996 if (copy_from_user(&req, optval, sizeof(req)))
1998 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
2000 case PACKET_COPY_THRESH:
2004 if (optlen != sizeof(val))
2006 if (copy_from_user(&val, optval, sizeof(val)))
2009 pkt_sk(sk)->copy_thresh = val;
2012 case PACKET_VERSION:
2016 if (optlen != sizeof(val))
2018 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2020 if (copy_from_user(&val, optval, sizeof(val)))
2025 po->tp_version = val;
2031 case PACKET_RESERVE:
2035 if (optlen != sizeof(val))
2037 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2039 if (copy_from_user(&val, optval, sizeof(val)))
2041 po->tp_reserve = val;
2048 if (optlen != sizeof(val))
2050 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2052 if (copy_from_user(&val, optval, sizeof(val)))
2054 po->tp_loss = !!val;
2057 case PACKET_AUXDATA:
2061 if (optlen < sizeof(val))
2063 if (copy_from_user(&val, optval, sizeof(val)))
2066 po->auxdata = !!val;
2069 case PACKET_ORIGDEV:
2073 if (optlen < sizeof(val))
2075 if (copy_from_user(&val, optval, sizeof(val)))
2078 po->origdev = !!val;
2081 case PACKET_VNET_HDR:
2085 if (sock->type != SOCK_RAW)
2087 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2089 if (optlen < sizeof(val))
2091 if (copy_from_user(&val, optval, sizeof(val)))
2094 po->has_vnet_hdr = !!val;
2097 case PACKET_TIMESTAMP:
2101 if (optlen != sizeof(val))
2103 if (copy_from_user(&val, optval, sizeof(val)))
2106 po->tp_tstamp = val;
2110 return -ENOPROTOOPT;
2114 static int packet_getsockopt(struct socket *sock, int level, int optname,
2115 char __user *optval, int __user *optlen)
2119 struct sock *sk = sock->sk;
2120 struct packet_sock *po = pkt_sk(sk);
2122 struct tpacket_stats st;
2124 if (level != SOL_PACKET)
2125 return -ENOPROTOOPT;
2127 if (get_user(len, optlen))
2134 case PACKET_STATISTICS:
2135 if (len > sizeof(struct tpacket_stats))
2136 len = sizeof(struct tpacket_stats);
2137 spin_lock_bh(&sk->sk_receive_queue.lock);
2139 memset(&po->stats, 0, sizeof(st));
2140 spin_unlock_bh(&sk->sk_receive_queue.lock);
2141 st.tp_packets += st.tp_drops;
2145 case PACKET_AUXDATA:
2146 if (len > sizeof(int))
2152 case PACKET_ORIGDEV:
2153 if (len > sizeof(int))
2159 case PACKET_VNET_HDR:
2160 if (len > sizeof(int))
2162 val = po->has_vnet_hdr;
2166 case PACKET_VERSION:
2167 if (len > sizeof(int))
2169 val = po->tp_version;
2173 if (len > sizeof(int))
2175 if (copy_from_user(&val, optval, len))
2179 val = sizeof(struct tpacket_hdr);
2182 val = sizeof(struct tpacket2_hdr);
2189 case PACKET_RESERVE:
2190 if (len > sizeof(unsigned int))
2191 len = sizeof(unsigned int);
2192 val = po->tp_reserve;
2196 if (len > sizeof(unsigned int))
2197 len = sizeof(unsigned int);
2201 case PACKET_TIMESTAMP:
2202 if (len > sizeof(int))
2204 val = po->tp_tstamp;
2208 return -ENOPROTOOPT;
2211 if (put_user(len, optlen))
2213 if (copy_to_user(optval, data, len))
2219 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2222 struct hlist_node *node;
2223 struct net_device *dev = data;
2224 struct net *net = dev_net(dev);
2227 sk_for_each_rcu(sk, node, &net->packet.sklist) {
2228 struct packet_sock *po = pkt_sk(sk);
2231 case NETDEV_UNREGISTER:
2233 packet_dev_mclist(dev, po->mclist, -1);
2237 if (dev->ifindex == po->ifindex) {
2238 spin_lock(&po->bind_lock);
2240 __dev_remove_pack(&po->prot_hook);
2243 sk->sk_err = ENETDOWN;
2244 if (!sock_flag(sk, SOCK_DEAD))
2245 sk->sk_error_report(sk);
2247 if (msg == NETDEV_UNREGISTER) {
2249 po->prot_hook.dev = NULL;
2251 spin_unlock(&po->bind_lock);
2255 if (dev->ifindex == po->ifindex) {
2256 spin_lock(&po->bind_lock);
2257 if (po->num && !po->running) {
2258 dev_add_pack(&po->prot_hook);
2262 spin_unlock(&po->bind_lock);
2272 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2275 struct sock *sk = sock->sk;
2280 int amount = sk_wmem_alloc_get(sk);
2282 return put_user(amount, (int __user *)arg);
2286 struct sk_buff *skb;
2289 spin_lock_bh(&sk->sk_receive_queue.lock);
2290 skb = skb_peek(&sk->sk_receive_queue);
2293 spin_unlock_bh(&sk->sk_receive_queue.lock);
2294 return put_user(amount, (int __user *)arg);
2297 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2299 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2309 case SIOCGIFBRDADDR:
2310 case SIOCSIFBRDADDR:
2311 case SIOCGIFNETMASK:
2312 case SIOCSIFNETMASK:
2313 case SIOCGIFDSTADDR:
2314 case SIOCSIFDSTADDR:
2316 return inet_dgram_ops.ioctl(sock, cmd, arg);
2320 return -ENOIOCTLCMD;
2325 static unsigned int packet_poll(struct file *file, struct socket *sock,
2328 struct sock *sk = sock->sk;
2329 struct packet_sock *po = pkt_sk(sk);
2330 unsigned int mask = datagram_poll(file, sock, wait);
2332 spin_lock_bh(&sk->sk_receive_queue.lock);
2333 if (po->rx_ring.pg_vec) {
2334 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2335 mask |= POLLIN | POLLRDNORM;
2337 spin_unlock_bh(&sk->sk_receive_queue.lock);
2338 spin_lock_bh(&sk->sk_write_queue.lock);
2339 if (po->tx_ring.pg_vec) {
2340 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2341 mask |= POLLOUT | POLLWRNORM;
2343 spin_unlock_bh(&sk->sk_write_queue.lock);
2348 /* Dirty? Well, I still did not learn better way to account
2352 static void packet_mm_open(struct vm_area_struct *vma)
2354 struct file *file = vma->vm_file;
2355 struct socket *sock = file->private_data;
2356 struct sock *sk = sock->sk;
2359 atomic_inc(&pkt_sk(sk)->mapped);
2362 static void packet_mm_close(struct vm_area_struct *vma)
2364 struct file *file = vma->vm_file;
2365 struct socket *sock = file->private_data;
2366 struct sock *sk = sock->sk;
2369 atomic_dec(&pkt_sk(sk)->mapped);
2372 static const struct vm_operations_struct packet_mmap_ops = {
2373 .open = packet_mm_open,
2374 .close = packet_mm_close,
2377 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2382 for (i = 0; i < len; i++) {
2383 if (likely(pg_vec[i].buffer)) {
2384 if (is_vmalloc_addr(pg_vec[i].buffer))
2385 vfree(pg_vec[i].buffer);
2387 free_pages((unsigned long)pg_vec[i].buffer,
2389 pg_vec[i].buffer = NULL;
2395 static inline char *alloc_one_pg_vec_page(unsigned long order)
2397 char *buffer = NULL;
2398 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2399 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2401 buffer = (char *) __get_free_pages(gfp_flags, order);
2407 * __get_free_pages failed, fall back to vmalloc
2409 buffer = vzalloc((1 << order) * PAGE_SIZE);
2415 * vmalloc failed, lets dig into swap here
2417 gfp_flags &= ~__GFP_NORETRY;
2418 buffer = (char *)__get_free_pages(gfp_flags, order);
2423 * complete and utter failure
2428 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
2430 unsigned int block_nr = req->tp_block_nr;
2434 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
2435 if (unlikely(!pg_vec))
2438 for (i = 0; i < block_nr; i++) {
2439 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
2440 if (unlikely(!pg_vec[i].buffer))
2441 goto out_free_pgvec;
2448 free_pg_vec(pg_vec, order, block_nr);
2453 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2454 int closing, int tx_ring)
2456 struct pgv *pg_vec = NULL;
2457 struct packet_sock *po = pkt_sk(sk);
2458 int was_running, order = 0;
2459 struct packet_ring_buffer *rb;
2460 struct sk_buff_head *rb_queue;
2464 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2465 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2469 if (atomic_read(&po->mapped))
2471 if (atomic_read(&rb->pending))
2475 if (req->tp_block_nr) {
2476 /* Sanity tests and some calculations */
2478 if (unlikely(rb->pg_vec))
2481 switch (po->tp_version) {
2483 po->tp_hdrlen = TPACKET_HDRLEN;
2486 po->tp_hdrlen = TPACKET2_HDRLEN;
2491 if (unlikely((int)req->tp_block_size <= 0))
2493 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2495 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2498 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2501 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2502 if (unlikely(rb->frames_per_block <= 0))
2504 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2509 order = get_order(req->tp_block_size);
2510 pg_vec = alloc_pg_vec(req, order);
2511 if (unlikely(!pg_vec))
2517 if (unlikely(req->tp_frame_nr))
2523 /* Detach socket from network */
2524 spin_lock(&po->bind_lock);
2525 was_running = po->running;
2528 __dev_remove_pack(&po->prot_hook);
2533 spin_unlock(&po->bind_lock);
2538 mutex_lock(&po->pg_vec_lock);
2539 if (closing || atomic_read(&po->mapped) == 0) {
2541 spin_lock_bh(&rb_queue->lock);
2542 swap(rb->pg_vec, pg_vec);
2543 rb->frame_max = (req->tp_frame_nr - 1);
2545 rb->frame_size = req->tp_frame_size;
2546 spin_unlock_bh(&rb_queue->lock);
2548 swap(rb->pg_vec_order, order);
2549 swap(rb->pg_vec_len, req->tp_block_nr);
2551 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2552 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2553 tpacket_rcv : packet_rcv;
2554 skb_queue_purge(rb_queue);
2555 if (atomic_read(&po->mapped))
2556 pr_err("packet_mmap: vma is busy: %d\n",
2557 atomic_read(&po->mapped));
2559 mutex_unlock(&po->pg_vec_lock);
2561 spin_lock(&po->bind_lock);
2562 if (was_running && !po->running) {
2566 dev_add_pack(&po->prot_hook);
2568 spin_unlock(&po->bind_lock);
2573 free_pg_vec(pg_vec, order, req->tp_block_nr);
2578 static int packet_mmap(struct file *file, struct socket *sock,
2579 struct vm_area_struct *vma)
2581 struct sock *sk = sock->sk;
2582 struct packet_sock *po = pkt_sk(sk);
2583 unsigned long size, expected_size;
2584 struct packet_ring_buffer *rb;
2585 unsigned long start;
2592 mutex_lock(&po->pg_vec_lock);
2595 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2597 expected_size += rb->pg_vec_len
2603 if (expected_size == 0)
2606 size = vma->vm_end - vma->vm_start;
2607 if (size != expected_size)
2610 start = vma->vm_start;
2611 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2612 if (rb->pg_vec == NULL)
2615 for (i = 0; i < rb->pg_vec_len; i++) {
2617 void *kaddr = rb->pg_vec[i].buffer;
2620 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2621 page = pgv_to_page(kaddr);
2622 err = vm_insert_page(vma, start, page);
2631 atomic_inc(&po->mapped);
2632 vma->vm_ops = &packet_mmap_ops;
2636 mutex_unlock(&po->pg_vec_lock);
2640 static const struct proto_ops packet_ops_spkt = {
2641 .family = PF_PACKET,
2642 .owner = THIS_MODULE,
2643 .release = packet_release,
2644 .bind = packet_bind_spkt,
2645 .connect = sock_no_connect,
2646 .socketpair = sock_no_socketpair,
2647 .accept = sock_no_accept,
2648 .getname = packet_getname_spkt,
2649 .poll = datagram_poll,
2650 .ioctl = packet_ioctl,
2651 .listen = sock_no_listen,
2652 .shutdown = sock_no_shutdown,
2653 .setsockopt = sock_no_setsockopt,
2654 .getsockopt = sock_no_getsockopt,
2655 .sendmsg = packet_sendmsg_spkt,
2656 .recvmsg = packet_recvmsg,
2657 .mmap = sock_no_mmap,
2658 .sendpage = sock_no_sendpage,
2661 static const struct proto_ops packet_ops = {
2662 .family = PF_PACKET,
2663 .owner = THIS_MODULE,
2664 .release = packet_release,
2665 .bind = packet_bind,
2666 .connect = sock_no_connect,
2667 .socketpair = sock_no_socketpair,
2668 .accept = sock_no_accept,
2669 .getname = packet_getname,
2670 .poll = packet_poll,
2671 .ioctl = packet_ioctl,
2672 .listen = sock_no_listen,
2673 .shutdown = sock_no_shutdown,
2674 .setsockopt = packet_setsockopt,
2675 .getsockopt = packet_getsockopt,
2676 .sendmsg = packet_sendmsg,
2677 .recvmsg = packet_recvmsg,
2678 .mmap = packet_mmap,
2679 .sendpage = sock_no_sendpage,
2682 static const struct net_proto_family packet_family_ops = {
2683 .family = PF_PACKET,
2684 .create = packet_create,
2685 .owner = THIS_MODULE,
2688 static struct notifier_block packet_netdev_notifier = {
2689 .notifier_call = packet_notifier,
2692 #ifdef CONFIG_PROC_FS
2694 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2697 struct net *net = seq_file_net(seq);
2700 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2703 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2705 struct net *net = seq_file_net(seq);
2706 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2709 static void packet_seq_stop(struct seq_file *seq, void *v)
2715 static int packet_seq_show(struct seq_file *seq, void *v)
2717 if (v == SEQ_START_TOKEN)
2718 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2720 struct sock *s = sk_entry(v);
2721 const struct packet_sock *po = pkt_sk(s);
2724 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2726 atomic_read(&s->sk_refcnt),
2731 atomic_read(&s->sk_rmem_alloc),
2739 static const struct seq_operations packet_seq_ops = {
2740 .start = packet_seq_start,
2741 .next = packet_seq_next,
2742 .stop = packet_seq_stop,
2743 .show = packet_seq_show,
2746 static int packet_seq_open(struct inode *inode, struct file *file)
2748 return seq_open_net(inode, file, &packet_seq_ops,
2749 sizeof(struct seq_net_private));
2752 static const struct file_operations packet_seq_fops = {
2753 .owner = THIS_MODULE,
2754 .open = packet_seq_open,
2756 .llseek = seq_lseek,
2757 .release = seq_release_net,
2762 static int __net_init packet_net_init(struct net *net)
2764 spin_lock_init(&net->packet.sklist_lock);
2765 INIT_HLIST_HEAD(&net->packet.sklist);
2767 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2773 static void __net_exit packet_net_exit(struct net *net)
2775 proc_net_remove(net, "packet");
2778 static struct pernet_operations packet_net_ops = {
2779 .init = packet_net_init,
2780 .exit = packet_net_exit,
2784 static void __exit packet_exit(void)
2786 unregister_netdevice_notifier(&packet_netdev_notifier);
2787 unregister_pernet_subsys(&packet_net_ops);
2788 sock_unregister(PF_PACKET);
2789 proto_unregister(&packet_proto);
2792 static int __init packet_init(void)
2794 int rc = proto_register(&packet_proto, 0);
2799 sock_register(&packet_family_ops);
2800 register_pernet_subsys(&packet_net_ops);
2801 register_netdevice_notifier(&packet_netdev_notifier);
2806 module_init(packet_init);
2807 module_exit(packet_exit);
2808 MODULE_LICENSE("GPL");
2809 MODULE_ALIAS_NETPROTO(PF_PACKET);