2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
69 return xt_alloc_initial_table(ip6t, IP6T);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff *skb,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
135 unsigned short _frag_off;
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
143 *fragoff = _frag_off;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
193 return (struct ip6t_entry *)(base + offset);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
200 static const struct ip6t_ip6 uncond;
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
208 return ip6t_get_target((struct ip6t_entry *)e);
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
238 .logflags = NF_LOG_MASK,
243 /* Mildly perf critical (only if packet tracing is on) */
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
275 static void trace_packet(const struct sk_buff *skb,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
310 return (void *)entry + entry->next_offset;
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ip6t_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar.hotdrop = false;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 addend = xt_write_recseq_begin();
351 private = table->private;
353 * Ensure we load private-> members after we've fetched the base
356 smp_read_barrier_depends();
357 cpu = smp_processor_id();
358 table_base = private->entries[cpu];
359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
360 stackptr = per_cpu_ptr(private->stackptr, cpu);
363 e = get_entry(table_base, private->hook_entry[hook]);
366 const struct xt_entry_target *t;
367 const struct xt_entry_match *ematch;
371 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
372 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
374 e = ip6t_next_entry(e);
378 xt_ematch_foreach(ematch, e) {
379 acpar.match = ematch->u.kernel.match;
380 acpar.matchinfo = ematch->data;
381 if (!acpar.match->match(skb, &acpar))
385 ADD_COUNTER(e->counters, skb->len, 1);
387 t = ip6t_get_target_c(e);
388 IP_NF_ASSERT(t->u.kernel.target);
390 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
391 /* The packet is traced: log it */
392 if (unlikely(skb->nf_trace))
393 trace_packet(skb, hook, in, out,
394 table->name, private, e);
396 /* Standard target? */
397 if (!t->u.kernel.target->target) {
400 v = ((struct xt_standard_target *)t)->verdict;
402 /* Pop from stack? */
403 if (v != XT_RETURN) {
404 verdict = (unsigned int)(-v) - 1;
407 if (*stackptr <= origptr)
408 e = get_entry(table_base,
409 private->underflow[hook]);
411 e = ip6t_next_entry(jumpstack[--*stackptr]);
414 if (table_base + v != ip6t_next_entry(e) &&
415 !(e->ipv6.flags & IP6T_F_GOTO)) {
416 if (*stackptr >= private->stacksize) {
420 jumpstack[(*stackptr)++] = e;
423 e = get_entry(table_base, v);
427 acpar.target = t->u.kernel.target;
428 acpar.targinfo = t->data;
430 verdict = t->u.kernel.target->target(skb, &acpar);
431 if (verdict == XT_CONTINUE)
432 e = ip6t_next_entry(e);
436 } while (!acpar.hotdrop);
440 xt_write_recseq_end(addend);
443 #ifdef DEBUG_ALLOW_ALL
452 /* Figures out from what hook each rule can be called: returns 0 if
453 there are loops. Puts hook bitmask in comefrom. */
455 mark_source_chains(const struct xt_table_info *newinfo,
456 unsigned int valid_hooks, void *entry0)
460 /* No recursion; use packet counter to save back ptrs (reset
461 to 0 as we leave), and comefrom to save source hook bitmask */
462 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
463 unsigned int pos = newinfo->hook_entry[hook];
464 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
466 if (!(valid_hooks & (1 << hook)))
469 /* Set initial back pointer. */
470 e->counters.pcnt = pos;
473 const struct xt_standard_target *t
474 = (void *)ip6t_get_target_c(e);
475 int visited = e->comefrom & (1 << hook);
477 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
478 pr_err("iptables: loop hook %u pos %u %08X.\n",
479 hook, pos, e->comefrom);
482 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
484 /* Unconditional return/END. */
485 if ((e->target_offset == sizeof(struct ip6t_entry) &&
486 (strcmp(t->target.u.user.name,
487 XT_STANDARD_TARGET) == 0) &&
489 unconditional(&e->ipv6)) || visited) {
490 unsigned int oldpos, size;
492 if ((strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < -NF_MAX_VERDICT - 1) {
495 duprintf("mark_source_chains: bad "
496 "negative verdict (%i)\n",
501 /* Return: backtrack through the last
504 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
505 #ifdef DEBUG_IP_FIREWALL_USER
507 & (1 << NF_INET_NUMHOOKS)) {
508 duprintf("Back unset "
515 pos = e->counters.pcnt;
516 e->counters.pcnt = 0;
518 /* We're at the start. */
522 e = (struct ip6t_entry *)
524 } while (oldpos == pos + e->next_offset);
527 size = e->next_offset;
528 e = (struct ip6t_entry *)
529 (entry0 + pos + size);
530 e->counters.pcnt = pos;
533 int newpos = t->verdict;
535 if (strcmp(t->target.u.user.name,
536 XT_STANDARD_TARGET) == 0 &&
538 if (newpos > newinfo->size -
539 sizeof(struct ip6t_entry)) {
540 duprintf("mark_source_chains: "
541 "bad verdict (%i)\n",
545 /* This a jump; chase it. */
546 duprintf("Jump rule %u -> %u\n",
549 /* ... this is a fallthru */
550 newpos = pos + e->next_offset;
552 e = (struct ip6t_entry *)
554 e->counters.pcnt = pos;
559 duprintf("Finished chain %u\n", hook);
564 static void cleanup_match(struct xt_entry_match *m, struct net *net)
566 struct xt_mtdtor_param par;
569 par.match = m->u.kernel.match;
570 par.matchinfo = m->data;
571 par.family = NFPROTO_IPV6;
572 if (par.match->destroy != NULL)
573 par.match->destroy(&par);
574 module_put(par.match->me);
578 check_entry(const struct ip6t_entry *e, const char *name)
580 const struct xt_entry_target *t;
582 if (!ip6_checkentry(&e->ipv6)) {
583 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
587 if (e->target_offset + sizeof(struct xt_entry_target) >
591 t = ip6t_get_target_c(e);
592 if (e->target_offset + t->u.target_size > e->next_offset)
598 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
600 const struct ip6t_ip6 *ipv6 = par->entryinfo;
603 par->match = m->u.kernel.match;
604 par->matchinfo = m->data;
606 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
607 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
609 duprintf("ip_tables: check failed for `%s'.\n",
617 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
619 struct xt_match *match;
622 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
625 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
626 return PTR_ERR(match);
628 m->u.kernel.match = match;
630 ret = check_match(m, par);
636 module_put(m->u.kernel.match->me);
640 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
642 struct xt_entry_target *t = ip6t_get_target(e);
643 struct xt_tgchk_param par = {
647 .target = t->u.kernel.target,
649 .hook_mask = e->comefrom,
650 .family = NFPROTO_IPV6,
654 t = ip6t_get_target(e);
655 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
656 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
658 duprintf("ip_tables: check failed for `%s'.\n",
659 t->u.kernel.target->name);
666 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
669 struct xt_entry_target *t;
670 struct xt_target *target;
673 struct xt_mtchk_param mtpar;
674 struct xt_entry_match *ematch;
676 ret = check_entry(e, name);
683 mtpar.entryinfo = &e->ipv6;
684 mtpar.hook_mask = e->comefrom;
685 mtpar.family = NFPROTO_IPV6;
686 xt_ematch_foreach(ematch, e) {
687 ret = find_check_match(ematch, &mtpar);
689 goto cleanup_matches;
693 t = ip6t_get_target(e);
694 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
696 if (IS_ERR(target)) {
697 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
698 ret = PTR_ERR(target);
699 goto cleanup_matches;
701 t->u.kernel.target = target;
703 ret = check_target(e, net, name);
708 module_put(t->u.kernel.target->me);
710 xt_ematch_foreach(ematch, e) {
713 cleanup_match(ematch, net);
718 static bool check_underflow(const struct ip6t_entry *e)
720 const struct xt_entry_target *t;
721 unsigned int verdict;
723 if (!unconditional(&e->ipv6))
725 t = ip6t_get_target_c(e);
726 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
728 verdict = ((struct xt_standard_target *)t)->verdict;
729 verdict = -verdict - 1;
730 return verdict == NF_DROP || verdict == NF_ACCEPT;
734 check_entry_size_and_hooks(struct ip6t_entry *e,
735 struct xt_table_info *newinfo,
736 const unsigned char *base,
737 const unsigned char *limit,
738 const unsigned int *hook_entries,
739 const unsigned int *underflows,
740 unsigned int valid_hooks)
744 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
745 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
746 duprintf("Bad offset %p\n", e);
751 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
752 duprintf("checking: element %p size %u\n",
757 /* Check hooks & underflows */
758 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
759 if (!(valid_hooks & (1 << h)))
761 if ((unsigned char *)e - base == hook_entries[h])
762 newinfo->hook_entry[h] = hook_entries[h];
763 if ((unsigned char *)e - base == underflows[h]) {
764 if (!check_underflow(e)) {
765 pr_err("Underflows must be unconditional and "
766 "use the STANDARD target with "
770 newinfo->underflow[h] = underflows[h];
774 /* Clear counters and comefrom */
775 e->counters = ((struct xt_counters) { 0, 0 });
780 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
782 struct xt_tgdtor_param par;
783 struct xt_entry_target *t;
784 struct xt_entry_match *ematch;
786 /* Cleanup all matches */
787 xt_ematch_foreach(ematch, e)
788 cleanup_match(ematch, net);
789 t = ip6t_get_target(e);
792 par.target = t->u.kernel.target;
793 par.targinfo = t->data;
794 par.family = NFPROTO_IPV6;
795 if (par.target->destroy != NULL)
796 par.target->destroy(&par);
797 module_put(par.target->me);
800 /* Checks and translates the user-supplied table segment (held in
803 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
804 const struct ip6t_replace *repl)
806 struct ip6t_entry *iter;
810 newinfo->size = repl->size;
811 newinfo->number = repl->num_entries;
813 /* Init all hooks to impossible value. */
814 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
815 newinfo->hook_entry[i] = 0xFFFFFFFF;
816 newinfo->underflow[i] = 0xFFFFFFFF;
819 duprintf("translate_table: size %u\n", newinfo->size);
821 /* Walk through entries, checking offsets. */
822 xt_entry_foreach(iter, entry0, newinfo->size) {
823 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
831 if (strcmp(ip6t_get_target(iter)->u.user.name,
832 XT_ERROR_TARGET) == 0)
833 ++newinfo->stacksize;
836 if (i != repl->num_entries) {
837 duprintf("translate_table: %u not %u entries\n",
838 i, repl->num_entries);
842 /* Check hooks all assigned */
843 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
844 /* Only hooks which are valid */
845 if (!(repl->valid_hooks & (1 << i)))
847 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
848 duprintf("Invalid hook entry %u %u\n",
849 i, repl->hook_entry[i]);
852 if (newinfo->underflow[i] == 0xFFFFFFFF) {
853 duprintf("Invalid underflow %u %u\n",
854 i, repl->underflow[i]);
859 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
862 /* Finally, each sanity check must pass */
864 xt_entry_foreach(iter, entry0, newinfo->size) {
865 ret = find_check_entry(iter, net, repl->name, repl->size);
872 xt_entry_foreach(iter, entry0, newinfo->size) {
875 cleanup_entry(iter, net);
880 /* And one copy for every other CPU */
881 for_each_possible_cpu(i) {
882 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
883 memcpy(newinfo->entries[i], entry0, newinfo->size);
890 get_counters(const struct xt_table_info *t,
891 struct xt_counters counters[])
893 struct ip6t_entry *iter;
897 for_each_possible_cpu(cpu) {
898 seqcount_t *s = &per_cpu(xt_recseq, cpu);
901 xt_entry_foreach(iter, t->entries[cpu], t->size) {
906 start = read_seqcount_begin(s);
907 bcnt = iter->counters.bcnt;
908 pcnt = iter->counters.pcnt;
909 } while (read_seqcount_retry(s, start));
911 ADD_COUNTER(counters[i], bcnt, pcnt);
917 static struct xt_counters *alloc_counters(const struct xt_table *table)
919 unsigned int countersize;
920 struct xt_counters *counters;
921 const struct xt_table_info *private = table->private;
923 /* We need atomic snapshot of counters: rest doesn't change
924 (other than comefrom, which userspace doesn't care
926 countersize = sizeof(struct xt_counters) * private->number;
927 counters = vzalloc(countersize);
929 if (counters == NULL)
930 return ERR_PTR(-ENOMEM);
932 get_counters(private, counters);
938 copy_entries_to_user(unsigned int total_size,
939 const struct xt_table *table,
940 void __user *userptr)
942 unsigned int off, num;
943 const struct ip6t_entry *e;
944 struct xt_counters *counters;
945 const struct xt_table_info *private = table->private;
947 const void *loc_cpu_entry;
949 counters = alloc_counters(table);
950 if (IS_ERR(counters))
951 return PTR_ERR(counters);
953 /* choose the copy that is on our node/cpu, ...
954 * This choice is lazy (because current thread is
955 * allowed to migrate to another cpu)
957 loc_cpu_entry = private->entries[raw_smp_processor_id()];
958 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
963 /* FIXME: use iterator macros --RR */
964 /* ... then go back and fix counters and names */
965 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
967 const struct xt_entry_match *m;
968 const struct xt_entry_target *t;
970 e = (struct ip6t_entry *)(loc_cpu_entry + off);
971 if (copy_to_user(userptr + off
972 + offsetof(struct ip6t_entry, counters),
974 sizeof(counters[num])) != 0) {
979 for (i = sizeof(struct ip6t_entry);
980 i < e->target_offset;
981 i += m->u.match_size) {
984 if (copy_to_user(userptr + off + i
985 + offsetof(struct xt_entry_match,
987 m->u.kernel.match->name,
988 strlen(m->u.kernel.match->name)+1)
995 t = ip6t_get_target_c(e);
996 if (copy_to_user(userptr + off + e->target_offset
997 + offsetof(struct xt_entry_target,
999 t->u.kernel.target->name,
1000 strlen(t->u.kernel.target->name)+1) != 0) {
1011 #ifdef CONFIG_COMPAT
1012 static void compat_standard_from_user(void *dst, const void *src)
1014 int v = *(compat_int_t *)src;
1017 v += xt_compat_calc_jump(AF_INET6, v);
1018 memcpy(dst, &v, sizeof(v));
1021 static int compat_standard_to_user(void __user *dst, const void *src)
1023 compat_int_t cv = *(int *)src;
1026 cv -= xt_compat_calc_jump(AF_INET6, cv);
1027 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1030 static int compat_calc_entry(const struct ip6t_entry *e,
1031 const struct xt_table_info *info,
1032 const void *base, struct xt_table_info *newinfo)
1034 const struct xt_entry_match *ematch;
1035 const struct xt_entry_target *t;
1036 unsigned int entry_offset;
1039 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1040 entry_offset = (void *)e - base;
1041 xt_ematch_foreach(ematch, e)
1042 off += xt_compat_match_offset(ematch->u.kernel.match);
1043 t = ip6t_get_target_c(e);
1044 off += xt_compat_target_offset(t->u.kernel.target);
1045 newinfo->size -= off;
1046 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1050 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1051 if (info->hook_entry[i] &&
1052 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1053 newinfo->hook_entry[i] -= off;
1054 if (info->underflow[i] &&
1055 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1056 newinfo->underflow[i] -= off;
1061 static int compat_table_info(const struct xt_table_info *info,
1062 struct xt_table_info *newinfo)
1064 struct ip6t_entry *iter;
1065 void *loc_cpu_entry;
1068 if (!newinfo || !info)
1071 /* we dont care about newinfo->entries[] */
1072 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1073 newinfo->initial_entries = 0;
1074 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1075 xt_compat_init_offsets(AF_INET6, info->number);
1076 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1077 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1085 static int get_info(struct net *net, void __user *user,
1086 const int *len, int compat)
1088 char name[XT_TABLE_MAXNAMELEN];
1092 if (*len != sizeof(struct ip6t_getinfo)) {
1093 duprintf("length %u != %zu\n", *len,
1094 sizeof(struct ip6t_getinfo));
1098 if (copy_from_user(name, user, sizeof(name)) != 0)
1101 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1102 #ifdef CONFIG_COMPAT
1104 xt_compat_lock(AF_INET6);
1106 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1107 "ip6table_%s", name);
1108 if (!IS_ERR_OR_NULL(t)) {
1109 struct ip6t_getinfo info;
1110 const struct xt_table_info *private = t->private;
1111 #ifdef CONFIG_COMPAT
1112 struct xt_table_info tmp;
1115 ret = compat_table_info(private, &tmp);
1116 xt_compat_flush_offsets(AF_INET6);
1120 memset(&info, 0, sizeof(info));
1121 info.valid_hooks = t->valid_hooks;
1122 memcpy(info.hook_entry, private->hook_entry,
1123 sizeof(info.hook_entry));
1124 memcpy(info.underflow, private->underflow,
1125 sizeof(info.underflow));
1126 info.num_entries = private->number;
1127 info.size = private->size;
1128 strcpy(info.name, name);
1130 if (copy_to_user(user, &info, *len) != 0)
1138 ret = t ? PTR_ERR(t) : -ENOENT;
1139 #ifdef CONFIG_COMPAT
1141 xt_compat_unlock(AF_INET6);
1147 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1151 struct ip6t_get_entries get;
1154 if (*len < sizeof(get)) {
1155 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1158 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1160 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1161 duprintf("get_entries: %u != %zu\n",
1162 *len, sizeof(get) + get.size);
1166 t = xt_find_table_lock(net, AF_INET6, get.name);
1167 if (!IS_ERR_OR_NULL(t)) {
1168 struct xt_table_info *private = t->private;
1169 duprintf("t->private->number = %u\n", private->number);
1170 if (get.size == private->size)
1171 ret = copy_entries_to_user(private->size,
1172 t, uptr->entrytable);
1174 duprintf("get_entries: I've got %u not %u!\n",
1175 private->size, get.size);
1181 ret = t ? PTR_ERR(t) : -ENOENT;
1187 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1188 struct xt_table_info *newinfo, unsigned int num_counters,
1189 void __user *counters_ptr)
1193 struct xt_table_info *oldinfo;
1194 struct xt_counters *counters;
1195 const void *loc_cpu_old_entry;
1196 struct ip6t_entry *iter;
1199 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1205 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1206 "ip6table_%s", name);
1207 if (IS_ERR_OR_NULL(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1234 /* Get the old counters, and synchronize with replace */
1235 get_counters(oldinfo, counters);
1237 /* Decrease module usage counts and free resource */
1238 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1239 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1240 cleanup_entry(iter, net);
1242 xt_free_table_info(oldinfo);
1243 if (copy_to_user(counters_ptr, counters,
1244 sizeof(struct xt_counters) * num_counters) != 0) {
1245 /* Silent error, can't fail, new table is already in place */
1246 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1255 free_newinfo_counters_untrans:
1262 do_replace(struct net *net, const void __user *user, unsigned int len)
1265 struct ip6t_replace tmp;
1266 struct xt_table_info *newinfo;
1267 void *loc_cpu_entry;
1268 struct ip6t_entry *iter;
1270 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1273 /* overflow check */
1274 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1276 tmp.name[sizeof(tmp.name)-1] = 0;
1278 newinfo = xt_alloc_table_info(tmp.size);
1282 /* choose the copy that is on our node/cpu */
1283 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1284 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1290 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1294 duprintf("ip_tables: Translated table\n");
1296 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1297 tmp.num_counters, tmp.counters);
1299 goto free_newinfo_untrans;
1302 free_newinfo_untrans:
1303 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1304 cleanup_entry(iter, net);
1306 xt_free_table_info(newinfo);
1311 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1314 unsigned int i, curcpu;
1315 struct xt_counters_info tmp;
1316 struct xt_counters *paddc;
1317 unsigned int num_counters;
1322 const struct xt_table_info *private;
1324 const void *loc_cpu_entry;
1325 struct ip6t_entry *iter;
1326 unsigned int addend;
1327 #ifdef CONFIG_COMPAT
1328 struct compat_xt_counters_info compat_tmp;
1332 size = sizeof(struct compat_xt_counters_info);
1337 size = sizeof(struct xt_counters_info);
1340 if (copy_from_user(ptmp, user, size) != 0)
1343 #ifdef CONFIG_COMPAT
1345 num_counters = compat_tmp.num_counters;
1346 name = compat_tmp.name;
1350 num_counters = tmp.num_counters;
1354 if (len != size + num_counters * sizeof(struct xt_counters))
1357 paddc = vmalloc(len - size);
1361 if (copy_from_user(paddc, user + size, len - size) != 0) {
1366 t = xt_find_table_lock(net, AF_INET6, name);
1367 if (IS_ERR_OR_NULL(t)) {
1368 ret = t ? PTR_ERR(t) : -ENOENT;
1374 private = t->private;
1375 if (private->number != num_counters) {
1377 goto unlock_up_free;
1381 /* Choose the copy that is on our node */
1382 curcpu = smp_processor_id();
1383 addend = xt_write_recseq_begin();
1384 loc_cpu_entry = private->entries[curcpu];
1385 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1386 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1389 xt_write_recseq_end(addend);
1401 #ifdef CONFIG_COMPAT
1402 struct compat_ip6t_replace {
1403 char name[XT_TABLE_MAXNAMELEN];
1407 u32 hook_entry[NF_INET_NUMHOOKS];
1408 u32 underflow[NF_INET_NUMHOOKS];
1410 compat_uptr_t counters; /* struct xt_counters * */
1411 struct compat_ip6t_entry entries[0];
1415 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1416 unsigned int *size, struct xt_counters *counters,
1419 struct xt_entry_target *t;
1420 struct compat_ip6t_entry __user *ce;
1421 u_int16_t target_offset, next_offset;
1422 compat_uint_t origsize;
1423 const struct xt_entry_match *ematch;
1427 ce = (struct compat_ip6t_entry __user *)*dstptr;
1428 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1429 copy_to_user(&ce->counters, &counters[i],
1430 sizeof(counters[i])) != 0)
1433 *dstptr += sizeof(struct compat_ip6t_entry);
1434 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1436 xt_ematch_foreach(ematch, e) {
1437 ret = xt_compat_match_to_user(ematch, dstptr, size);
1441 target_offset = e->target_offset - (origsize - *size);
1442 t = ip6t_get_target(e);
1443 ret = xt_compat_target_to_user(t, dstptr, size);
1446 next_offset = e->next_offset - (origsize - *size);
1447 if (put_user(target_offset, &ce->target_offset) != 0 ||
1448 put_user(next_offset, &ce->next_offset) != 0)
1454 compat_find_calc_match(struct xt_entry_match *m,
1456 const struct ip6t_ip6 *ipv6,
1457 unsigned int hookmask,
1460 struct xt_match *match;
1462 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1463 m->u.user.revision);
1464 if (IS_ERR(match)) {
1465 duprintf("compat_check_calc_match: `%s' not found\n",
1467 return PTR_ERR(match);
1469 m->u.kernel.match = match;
1470 *size += xt_compat_match_offset(match);
1474 static void compat_release_entry(struct compat_ip6t_entry *e)
1476 struct xt_entry_target *t;
1477 struct xt_entry_match *ematch;
1479 /* Cleanup all matches */
1480 xt_ematch_foreach(ematch, e)
1481 module_put(ematch->u.kernel.match->me);
1482 t = compat_ip6t_get_target(e);
1483 module_put(t->u.kernel.target->me);
1487 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1488 struct xt_table_info *newinfo,
1490 const unsigned char *base,
1491 const unsigned char *limit,
1492 const unsigned int *hook_entries,
1493 const unsigned int *underflows,
1496 struct xt_entry_match *ematch;
1497 struct xt_entry_target *t;
1498 struct xt_target *target;
1499 unsigned int entry_offset;
1503 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1504 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1505 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1506 duprintf("Bad offset %p, limit = %p\n", e, limit);
1510 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1511 sizeof(struct compat_xt_entry_target)) {
1512 duprintf("checking: element %p size %u\n",
1517 /* For purposes of check_entry casting the compat entry is fine */
1518 ret = check_entry((struct ip6t_entry *)e, name);
1522 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1523 entry_offset = (void *)e - (void *)base;
1525 xt_ematch_foreach(ematch, e) {
1526 ret = compat_find_calc_match(ematch, name,
1527 &e->ipv6, e->comefrom, &off);
1529 goto release_matches;
1533 t = compat_ip6t_get_target(e);
1534 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1535 t->u.user.revision);
1536 if (IS_ERR(target)) {
1537 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1539 ret = PTR_ERR(target);
1540 goto release_matches;
1542 t->u.kernel.target = target;
1544 off += xt_compat_target_offset(target);
1546 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1550 /* Check hooks & underflows */
1551 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1552 if ((unsigned char *)e - base == hook_entries[h])
1553 newinfo->hook_entry[h] = hook_entries[h];
1554 if ((unsigned char *)e - base == underflows[h])
1555 newinfo->underflow[h] = underflows[h];
1558 /* Clear counters and comefrom */
1559 memset(&e->counters, 0, sizeof(e->counters));
1564 module_put(t->u.kernel.target->me);
1566 xt_ematch_foreach(ematch, e) {
1569 module_put(ematch->u.kernel.match->me);
1575 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1576 unsigned int *size, const char *name,
1577 struct xt_table_info *newinfo, unsigned char *base)
1579 struct xt_entry_target *t;
1580 struct ip6t_entry *de;
1581 unsigned int origsize;
1583 struct xt_entry_match *ematch;
1587 de = (struct ip6t_entry *)*dstptr;
1588 memcpy(de, e, sizeof(struct ip6t_entry));
1589 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1591 *dstptr += sizeof(struct ip6t_entry);
1592 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1594 xt_ematch_foreach(ematch, e) {
1595 ret = xt_compat_match_from_user(ematch, dstptr, size);
1599 de->target_offset = e->target_offset - (origsize - *size);
1600 t = compat_ip6t_get_target(e);
1601 xt_compat_target_from_user(t, dstptr, size);
1603 de->next_offset = e->next_offset - (origsize - *size);
1604 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1605 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1606 newinfo->hook_entry[h] -= origsize - *size;
1607 if ((unsigned char *)de - base < newinfo->underflow[h])
1608 newinfo->underflow[h] -= origsize - *size;
1613 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1618 struct xt_mtchk_param mtpar;
1619 struct xt_entry_match *ematch;
1624 mtpar.entryinfo = &e->ipv6;
1625 mtpar.hook_mask = e->comefrom;
1626 mtpar.family = NFPROTO_IPV6;
1627 xt_ematch_foreach(ematch, e) {
1628 ret = check_match(ematch, &mtpar);
1630 goto cleanup_matches;
1634 ret = check_target(e, net, name);
1636 goto cleanup_matches;
1640 xt_ematch_foreach(ematch, e) {
1643 cleanup_match(ematch, net);
1649 translate_compat_table(struct net *net,
1651 unsigned int valid_hooks,
1652 struct xt_table_info **pinfo,
1654 unsigned int total_size,
1655 unsigned int number,
1656 unsigned int *hook_entries,
1657 unsigned int *underflows)
1660 struct xt_table_info *newinfo, *info;
1661 void *pos, *entry0, *entry1;
1662 struct compat_ip6t_entry *iter0;
1663 struct ip6t_entry *iter1;
1670 info->number = number;
1672 /* Init all hooks to impossible value. */
1673 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1674 info->hook_entry[i] = 0xFFFFFFFF;
1675 info->underflow[i] = 0xFFFFFFFF;
1678 duprintf("translate_compat_table: size %u\n", info->size);
1680 xt_compat_lock(AF_INET6);
1681 xt_compat_init_offsets(AF_INET6, number);
1682 /* Walk through entries, checking offsets. */
1683 xt_entry_foreach(iter0, entry0, total_size) {
1684 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1686 entry0 + total_size,
1697 duprintf("translate_compat_table: %u not %u entries\n",
1702 /* Check hooks all assigned */
1703 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1704 /* Only hooks which are valid */
1705 if (!(valid_hooks & (1 << i)))
1707 if (info->hook_entry[i] == 0xFFFFFFFF) {
1708 duprintf("Invalid hook entry %u %u\n",
1709 i, hook_entries[i]);
1712 if (info->underflow[i] == 0xFFFFFFFF) {
1713 duprintf("Invalid underflow %u %u\n",
1720 newinfo = xt_alloc_table_info(size);
1724 newinfo->number = number;
1725 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1726 newinfo->hook_entry[i] = info->hook_entry[i];
1727 newinfo->underflow[i] = info->underflow[i];
1729 entry1 = newinfo->entries[raw_smp_processor_id()];
1732 xt_entry_foreach(iter0, entry0, total_size) {
1733 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1734 name, newinfo, entry1);
1738 xt_compat_flush_offsets(AF_INET6);
1739 xt_compat_unlock(AF_INET6);
1744 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1748 xt_entry_foreach(iter1, entry1, newinfo->size) {
1749 ret = compat_check_entry(iter1, net, name);
1753 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1754 XT_ERROR_TARGET) == 0)
1755 ++newinfo->stacksize;
1759 * The first i matches need cleanup_entry (calls ->destroy)
1760 * because they had called ->check already. The other j-i
1761 * entries need only release.
1765 xt_entry_foreach(iter0, entry0, newinfo->size) {
1770 compat_release_entry(iter0);
1772 xt_entry_foreach(iter1, entry1, newinfo->size) {
1775 cleanup_entry(iter1, net);
1777 xt_free_table_info(newinfo);
1781 /* And one copy for every other CPU */
1782 for_each_possible_cpu(i)
1783 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1784 memcpy(newinfo->entries[i], entry1, newinfo->size);
1788 xt_free_table_info(info);
1792 xt_free_table_info(newinfo);
1794 xt_entry_foreach(iter0, entry0, total_size) {
1797 compat_release_entry(iter0);
1801 xt_compat_flush_offsets(AF_INET6);
1802 xt_compat_unlock(AF_INET6);
1807 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1810 struct compat_ip6t_replace tmp;
1811 struct xt_table_info *newinfo;
1812 void *loc_cpu_entry;
1813 struct ip6t_entry *iter;
1815 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1818 /* overflow check */
1819 if (tmp.size >= INT_MAX / num_possible_cpus())
1821 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1823 tmp.name[sizeof(tmp.name)-1] = 0;
1825 newinfo = xt_alloc_table_info(tmp.size);
1829 /* choose the copy that is on our node/cpu */
1830 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1831 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1837 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1838 &newinfo, &loc_cpu_entry, tmp.size,
1839 tmp.num_entries, tmp.hook_entry,
1844 duprintf("compat_do_replace: Translated table\n");
1846 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1847 tmp.num_counters, compat_ptr(tmp.counters));
1849 goto free_newinfo_untrans;
1852 free_newinfo_untrans:
1853 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1854 cleanup_entry(iter, net);
1856 xt_free_table_info(newinfo);
1861 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1866 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1870 case IP6T_SO_SET_REPLACE:
1871 ret = compat_do_replace(sock_net(sk), user, len);
1874 case IP6T_SO_SET_ADD_COUNTERS:
1875 ret = do_add_counters(sock_net(sk), user, len, 1);
1879 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1886 struct compat_ip6t_get_entries {
1887 char name[XT_TABLE_MAXNAMELEN];
1889 struct compat_ip6t_entry entrytable[0];
1893 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1894 void __user *userptr)
1896 struct xt_counters *counters;
1897 const struct xt_table_info *private = table->private;
1901 const void *loc_cpu_entry;
1903 struct ip6t_entry *iter;
1905 counters = alloc_counters(table);
1906 if (IS_ERR(counters))
1907 return PTR_ERR(counters);
1909 /* choose the copy that is on our node/cpu, ...
1910 * This choice is lazy (because current thread is
1911 * allowed to migrate to another cpu)
1913 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1916 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1917 ret = compat_copy_entry_to_user(iter, &pos,
1918 &size, counters, i++);
1928 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1932 struct compat_ip6t_get_entries get;
1935 if (*len < sizeof(get)) {
1936 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1940 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1943 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1944 duprintf("compat_get_entries: %u != %zu\n",
1945 *len, sizeof(get) + get.size);
1949 xt_compat_lock(AF_INET6);
1950 t = xt_find_table_lock(net, AF_INET6, get.name);
1951 if (!IS_ERR_OR_NULL(t)) {
1952 const struct xt_table_info *private = t->private;
1953 struct xt_table_info info;
1954 duprintf("t->private->number = %u\n", private->number);
1955 ret = compat_table_info(private, &info);
1956 if (!ret && get.size == info.size) {
1957 ret = compat_copy_entries_to_user(private->size,
1958 t, uptr->entrytable);
1960 duprintf("compat_get_entries: I've got %u not %u!\n",
1961 private->size, get.size);
1964 xt_compat_flush_offsets(AF_INET6);
1968 ret = t ? PTR_ERR(t) : -ENOENT;
1970 xt_compat_unlock(AF_INET6);
1974 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1977 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1981 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1985 case IP6T_SO_GET_INFO:
1986 ret = get_info(sock_net(sk), user, len, 1);
1988 case IP6T_SO_GET_ENTRIES:
1989 ret = compat_get_entries(sock_net(sk), user, len);
1992 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1999 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2003 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2007 case IP6T_SO_SET_REPLACE:
2008 ret = do_replace(sock_net(sk), user, len);
2011 case IP6T_SO_SET_ADD_COUNTERS:
2012 ret = do_add_counters(sock_net(sk), user, len, 0);
2016 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2024 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2028 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2032 case IP6T_SO_GET_INFO:
2033 ret = get_info(sock_net(sk), user, len, 0);
2036 case IP6T_SO_GET_ENTRIES:
2037 ret = get_entries(sock_net(sk), user, len);
2040 case IP6T_SO_GET_REVISION_MATCH:
2041 case IP6T_SO_GET_REVISION_TARGET: {
2042 struct xt_get_revision rev;
2045 if (*len != sizeof(rev)) {
2049 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2053 rev.name[sizeof(rev.name)-1] = 0;
2055 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2060 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2063 "ip6t_%s", rev.name);
2068 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2075 struct xt_table *ip6t_register_table(struct net *net,
2076 const struct xt_table *table,
2077 const struct ip6t_replace *repl)
2080 struct xt_table_info *newinfo;
2081 struct xt_table_info bootstrap = {0};
2082 void *loc_cpu_entry;
2083 struct xt_table *new_table;
2085 newinfo = xt_alloc_table_info(repl->size);
2091 /* choose the copy on our node/cpu, but dont care about preemption */
2092 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2093 memcpy(loc_cpu_entry, repl->entries, repl->size);
2095 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2099 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2100 if (IS_ERR(new_table)) {
2101 ret = PTR_ERR(new_table);
2107 xt_free_table_info(newinfo);
2109 return ERR_PTR(ret);
2112 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2114 struct xt_table_info *private;
2115 void *loc_cpu_entry;
2116 struct module *table_owner = table->me;
2117 struct ip6t_entry *iter;
2119 private = xt_unregister_table(table);
2121 /* Decrease module usage counts and free resources */
2122 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2123 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2124 cleanup_entry(iter, net);
2125 if (private->number > private->initial_entries)
2126 module_put(table_owner);
2127 xt_free_table_info(private);
2130 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2132 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2133 u_int8_t type, u_int8_t code,
2136 return (type == test_type && code >= min_code && code <= max_code)
2141 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2143 const struct icmp6hdr *ic;
2144 struct icmp6hdr _icmph;
2145 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2147 /* Must not be a fragment. */
2148 if (par->fragoff != 0)
2151 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2153 /* We've been asked to examine this packet, and we
2154 * can't. Hence, no choice but to drop.
2156 duprintf("Dropping evil ICMP tinygram.\n");
2157 par->hotdrop = true;
2161 return icmp6_type_code_match(icmpinfo->type,
2164 ic->icmp6_type, ic->icmp6_code,
2165 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2168 /* Called when user tries to insert an entry of this type. */
2169 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2171 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2173 /* Must specify no unknown invflags */
2174 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2177 /* The built-in targets: standard (NULL) and error. */
2178 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2180 .name = XT_STANDARD_TARGET,
2181 .targetsize = sizeof(int),
2182 .family = NFPROTO_IPV6,
2183 #ifdef CONFIG_COMPAT
2184 .compatsize = sizeof(compat_int_t),
2185 .compat_from_user = compat_standard_from_user,
2186 .compat_to_user = compat_standard_to_user,
2190 .name = XT_ERROR_TARGET,
2191 .target = ip6t_error,
2192 .targetsize = XT_FUNCTION_MAXNAMELEN,
2193 .family = NFPROTO_IPV6,
2197 static struct nf_sockopt_ops ip6t_sockopts = {
2199 .set_optmin = IP6T_BASE_CTL,
2200 .set_optmax = IP6T_SO_SET_MAX+1,
2201 .set = do_ip6t_set_ctl,
2202 #ifdef CONFIG_COMPAT
2203 .compat_set = compat_do_ip6t_set_ctl,
2205 .get_optmin = IP6T_BASE_CTL,
2206 .get_optmax = IP6T_SO_GET_MAX+1,
2207 .get = do_ip6t_get_ctl,
2208 #ifdef CONFIG_COMPAT
2209 .compat_get = compat_do_ip6t_get_ctl,
2211 .owner = THIS_MODULE,
2214 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2217 .match = icmp6_match,
2218 .matchsize = sizeof(struct ip6t_icmp),
2219 .checkentry = icmp6_checkentry,
2220 .proto = IPPROTO_ICMPV6,
2221 .family = NFPROTO_IPV6,
2225 static int __net_init ip6_tables_net_init(struct net *net)
2227 return xt_proto_init(net, NFPROTO_IPV6);
2230 static void __net_exit ip6_tables_net_exit(struct net *net)
2232 xt_proto_fini(net, NFPROTO_IPV6);
2235 static struct pernet_operations ip6_tables_net_ops = {
2236 .init = ip6_tables_net_init,
2237 .exit = ip6_tables_net_exit,
2240 static int __init ip6_tables_init(void)
2244 ret = register_pernet_subsys(&ip6_tables_net_ops);
2248 /* No one else will be downing sem now, so we won't sleep */
2249 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2252 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2256 /* Register setsockopt */
2257 ret = nf_register_sockopt(&ip6t_sockopts);
2261 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2265 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2267 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2269 unregister_pernet_subsys(&ip6_tables_net_ops);
2274 static void __exit ip6_tables_fini(void)
2276 nf_unregister_sockopt(&ip6t_sockopts);
2278 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2279 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2280 unregister_pernet_subsys(&ip6_tables_net_ops);
2283 EXPORT_SYMBOL(ip6t_register_table);
2284 EXPORT_SYMBOL(ip6t_unregister_table);
2285 EXPORT_SYMBOL(ip6t_do_table);
2287 module_init(ip6_tables_init);
2288 module_exit(ip6_tables_fini);