2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
69 return xt_alloc_initial_table(ip6t, IP6T);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff *skb,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
135 unsigned short _frag_off;
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
143 *fragoff = _frag_off;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
193 return (struct ip6t_entry *)(base + offset);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
200 static const struct ip6t_ip6 uncond;
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
208 return ip6t_get_target((struct ip6t_entry *)e);
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
238 .logflags = NF_LOG_MASK,
243 /* Mildly perf critical (only if packet tracing is on) */
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
275 static void trace_packet(const struct sk_buff *skb,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
310 return (void *)entry + entry->next_offset;
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ip6t_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar.hotdrop = false;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 addend = xt_write_recseq_begin();
351 private = table->private;
352 cpu = smp_processor_id();
353 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
355 stackptr = per_cpu_ptr(private->stackptr, cpu);
358 e = get_entry(table_base, private->hook_entry[hook]);
361 const struct xt_entry_target *t;
362 const struct xt_entry_match *ematch;
366 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
367 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
369 e = ip6t_next_entry(e);
373 xt_ematch_foreach(ematch, e) {
374 acpar.match = ematch->u.kernel.match;
375 acpar.matchinfo = ematch->data;
376 if (!acpar.match->match(skb, &acpar))
380 ADD_COUNTER(e->counters, skb->len, 1);
382 t = ip6t_get_target_c(e);
383 IP_NF_ASSERT(t->u.kernel.target);
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb->nf_trace))
388 trace_packet(skb, hook, in, out,
389 table->name, private, e);
391 /* Standard target? */
392 if (!t->u.kernel.target->target) {
395 v = ((struct xt_standard_target *)t)->verdict;
397 /* Pop from stack? */
398 if (v != XT_RETURN) {
399 verdict = (unsigned int)(-v) - 1;
402 if (*stackptr <= origptr)
403 e = get_entry(table_base,
404 private->underflow[hook]);
406 e = ip6t_next_entry(jumpstack[--*stackptr]);
409 if (table_base + v != ip6t_next_entry(e) &&
410 !(e->ipv6.flags & IP6T_F_GOTO)) {
411 if (*stackptr >= private->stacksize) {
415 jumpstack[(*stackptr)++] = e;
418 e = get_entry(table_base, v);
422 acpar.target = t->u.kernel.target;
423 acpar.targinfo = t->data;
425 verdict = t->u.kernel.target->target(skb, &acpar);
426 if (verdict == XT_CONTINUE)
427 e = ip6t_next_entry(e);
431 } while (!acpar.hotdrop);
435 xt_write_recseq_end(addend);
438 #ifdef DEBUG_ALLOW_ALL
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
450 mark_source_chains(const struct xt_table_info *newinfo,
451 unsigned int valid_hooks, void *entry0)
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
458 unsigned int pos = newinfo->hook_entry[hook];
459 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
461 if (!(valid_hooks & (1 << hook)))
464 /* Set initial back pointer. */
465 e->counters.pcnt = pos;
468 const struct xt_standard_target *t
469 = (void *)ip6t_get_target_c(e);
470 int visited = e->comefrom & (1 << hook);
472 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook, pos, e->comefrom);
477 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
479 /* Unconditional return/END. */
480 if ((e->target_offset == sizeof(struct ip6t_entry) &&
481 (strcmp(t->target.u.user.name,
482 XT_STANDARD_TARGET) == 0) &&
484 unconditional(&e->ipv6)) || visited) {
485 unsigned int oldpos, size;
487 if ((strcmp(t->target.u.user.name,
488 XT_STANDARD_TARGET) == 0) &&
489 t->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
496 /* Return: backtrack through the last
499 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
502 & (1 << NF_INET_NUMHOOKS)) {
503 duprintf("Back unset "
510 pos = e->counters.pcnt;
511 e->counters.pcnt = 0;
513 /* We're at the start. */
517 e = (struct ip6t_entry *)
519 } while (oldpos == pos + e->next_offset);
522 size = e->next_offset;
523 e = (struct ip6t_entry *)
524 (entry0 + pos + size);
525 e->counters.pcnt = pos;
528 int newpos = t->verdict;
530 if (strcmp(t->target.u.user.name,
531 XT_STANDARD_TARGET) == 0 &&
533 if (newpos > newinfo->size -
534 sizeof(struct ip6t_entry)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
544 /* ... this is a fallthru */
545 newpos = pos + e->next_offset;
547 e = (struct ip6t_entry *)
549 e->counters.pcnt = pos;
554 duprintf("Finished chain %u\n", hook);
559 static void cleanup_match(struct xt_entry_match *m, struct net *net)
561 struct xt_mtdtor_param par;
564 par.match = m->u.kernel.match;
565 par.matchinfo = m->data;
566 par.family = NFPROTO_IPV6;
567 if (par.match->destroy != NULL)
568 par.match->destroy(&par);
569 module_put(par.match->me);
573 check_entry(const struct ip6t_entry *e, const char *name)
575 const struct xt_entry_target *t;
577 if (!ip6_checkentry(&e->ipv6)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
582 if (e->target_offset + sizeof(struct xt_entry_target) >
586 t = ip6t_get_target_c(e);
587 if (e->target_offset + t->u.target_size > e->next_offset)
593 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
595 const struct ip6t_ip6 *ipv6 = par->entryinfo;
598 par->match = m->u.kernel.match;
599 par->matchinfo = m->data;
601 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
602 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
604 duprintf("ip_tables: check failed for `%s'.\n",
612 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
614 struct xt_match *match;
617 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
620 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
621 return PTR_ERR(match);
623 m->u.kernel.match = match;
625 ret = check_match(m, par);
631 module_put(m->u.kernel.match->me);
635 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
637 struct xt_entry_target *t = ip6t_get_target(e);
638 struct xt_tgchk_param par = {
642 .target = t->u.kernel.target,
644 .hook_mask = e->comefrom,
645 .family = NFPROTO_IPV6,
649 t = ip6t_get_target(e);
650 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
651 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
653 duprintf("ip_tables: check failed for `%s'.\n",
654 t->u.kernel.target->name);
661 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
664 struct xt_entry_target *t;
665 struct xt_target *target;
668 struct xt_mtchk_param mtpar;
669 struct xt_entry_match *ematch;
671 ret = check_entry(e, name);
678 mtpar.entryinfo = &e->ipv6;
679 mtpar.hook_mask = e->comefrom;
680 mtpar.family = NFPROTO_IPV6;
681 xt_ematch_foreach(ematch, e) {
682 ret = find_check_match(ematch, &mtpar);
684 goto cleanup_matches;
688 t = ip6t_get_target(e);
689 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
691 if (IS_ERR(target)) {
692 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
693 ret = PTR_ERR(target);
694 goto cleanup_matches;
696 t->u.kernel.target = target;
698 ret = check_target(e, net, name);
703 module_put(t->u.kernel.target->me);
705 xt_ematch_foreach(ematch, e) {
708 cleanup_match(ematch, net);
713 static bool check_underflow(const struct ip6t_entry *e)
715 const struct xt_entry_target *t;
716 unsigned int verdict;
718 if (!unconditional(&e->ipv6))
720 t = ip6t_get_target_c(e);
721 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
723 verdict = ((struct xt_standard_target *)t)->verdict;
724 verdict = -verdict - 1;
725 return verdict == NF_DROP || verdict == NF_ACCEPT;
729 check_entry_size_and_hooks(struct ip6t_entry *e,
730 struct xt_table_info *newinfo,
731 const unsigned char *base,
732 const unsigned char *limit,
733 const unsigned int *hook_entries,
734 const unsigned int *underflows,
735 unsigned int valid_hooks)
739 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
740 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
741 duprintf("Bad offset %p\n", e);
746 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
747 duprintf("checking: element %p size %u\n",
752 /* Check hooks & underflows */
753 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
754 if (!(valid_hooks & (1 << h)))
756 if ((unsigned char *)e - base == hook_entries[h])
757 newinfo->hook_entry[h] = hook_entries[h];
758 if ((unsigned char *)e - base == underflows[h]) {
759 if (!check_underflow(e)) {
760 pr_err("Underflows must be unconditional and "
761 "use the STANDARD target with "
765 newinfo->underflow[h] = underflows[h];
769 /* Clear counters and comefrom */
770 e->counters = ((struct xt_counters) { 0, 0 });
775 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
777 struct xt_tgdtor_param par;
778 struct xt_entry_target *t;
779 struct xt_entry_match *ematch;
781 /* Cleanup all matches */
782 xt_ematch_foreach(ematch, e)
783 cleanup_match(ematch, net);
784 t = ip6t_get_target(e);
787 par.target = t->u.kernel.target;
788 par.targinfo = t->data;
789 par.family = NFPROTO_IPV6;
790 if (par.target->destroy != NULL)
791 par.target->destroy(&par);
792 module_put(par.target->me);
795 /* Checks and translates the user-supplied table segment (held in
798 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
799 const struct ip6t_replace *repl)
801 struct ip6t_entry *iter;
805 newinfo->size = repl->size;
806 newinfo->number = repl->num_entries;
808 /* Init all hooks to impossible value. */
809 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
810 newinfo->hook_entry[i] = 0xFFFFFFFF;
811 newinfo->underflow[i] = 0xFFFFFFFF;
814 duprintf("translate_table: size %u\n", newinfo->size);
816 /* Walk through entries, checking offsets. */
817 xt_entry_foreach(iter, entry0, newinfo->size) {
818 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
826 if (strcmp(ip6t_get_target(iter)->u.user.name,
827 XT_ERROR_TARGET) == 0)
828 ++newinfo->stacksize;
831 if (i != repl->num_entries) {
832 duprintf("translate_table: %u not %u entries\n",
833 i, repl->num_entries);
837 /* Check hooks all assigned */
838 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
839 /* Only hooks which are valid */
840 if (!(repl->valid_hooks & (1 << i)))
842 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
843 duprintf("Invalid hook entry %u %u\n",
844 i, repl->hook_entry[i]);
847 if (newinfo->underflow[i] == 0xFFFFFFFF) {
848 duprintf("Invalid underflow %u %u\n",
849 i, repl->underflow[i]);
854 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
857 /* Finally, each sanity check must pass */
859 xt_entry_foreach(iter, entry0, newinfo->size) {
860 ret = find_check_entry(iter, net, repl->name, repl->size);
867 xt_entry_foreach(iter, entry0, newinfo->size) {
870 cleanup_entry(iter, net);
875 /* And one copy for every other CPU */
876 for_each_possible_cpu(i) {
877 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
878 memcpy(newinfo->entries[i], entry0, newinfo->size);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
888 struct ip6t_entry *iter;
892 for_each_possible_cpu(cpu) {
893 seqcount_t *s = &per_cpu(xt_recseq, cpu);
896 xt_entry_foreach(iter, t->entries[cpu], t->size) {
901 start = read_seqcount_begin(s);
902 bcnt = iter->counters.bcnt;
903 pcnt = iter->counters.pcnt;
904 } while (read_seqcount_retry(s, start));
906 ADD_COUNTER(counters[i], bcnt, pcnt);
912 static struct xt_counters *alloc_counters(const struct xt_table *table)
914 unsigned int countersize;
915 struct xt_counters *counters;
916 const struct xt_table_info *private = table->private;
918 /* We need atomic snapshot of counters: rest doesn't change
919 (other than comefrom, which userspace doesn't care
921 countersize = sizeof(struct xt_counters) * private->number;
922 counters = vzalloc(countersize);
924 if (counters == NULL)
925 return ERR_PTR(-ENOMEM);
927 get_counters(private, counters);
933 copy_entries_to_user(unsigned int total_size,
934 const struct xt_table *table,
935 void __user *userptr)
937 unsigned int off, num;
938 const struct ip6t_entry *e;
939 struct xt_counters *counters;
940 const struct xt_table_info *private = table->private;
942 const void *loc_cpu_entry;
944 counters = alloc_counters(table);
945 if (IS_ERR(counters))
946 return PTR_ERR(counters);
948 /* choose the copy that is on our node/cpu, ...
949 * This choice is lazy (because current thread is
950 * allowed to migrate to another cpu)
952 loc_cpu_entry = private->entries[raw_smp_processor_id()];
953 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
958 /* FIXME: use iterator macros --RR */
959 /* ... then go back and fix counters and names */
960 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
962 const struct xt_entry_match *m;
963 const struct xt_entry_target *t;
965 e = (struct ip6t_entry *)(loc_cpu_entry + off);
966 if (copy_to_user(userptr + off
967 + offsetof(struct ip6t_entry, counters),
969 sizeof(counters[num])) != 0) {
974 for (i = sizeof(struct ip6t_entry);
975 i < e->target_offset;
976 i += m->u.match_size) {
979 if (copy_to_user(userptr + off + i
980 + offsetof(struct xt_entry_match,
982 m->u.kernel.match->name,
983 strlen(m->u.kernel.match->name)+1)
990 t = ip6t_get_target_c(e);
991 if (copy_to_user(userptr + off + e->target_offset
992 + offsetof(struct xt_entry_target,
994 t->u.kernel.target->name,
995 strlen(t->u.kernel.target->name)+1) != 0) {
1006 #ifdef CONFIG_COMPAT
1007 static void compat_standard_from_user(void *dst, const void *src)
1009 int v = *(compat_int_t *)src;
1012 v += xt_compat_calc_jump(AF_INET6, v);
1013 memcpy(dst, &v, sizeof(v));
1016 static int compat_standard_to_user(void __user *dst, const void *src)
1018 compat_int_t cv = *(int *)src;
1021 cv -= xt_compat_calc_jump(AF_INET6, cv);
1022 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1025 static int compat_calc_entry(const struct ip6t_entry *e,
1026 const struct xt_table_info *info,
1027 const void *base, struct xt_table_info *newinfo)
1029 const struct xt_entry_match *ematch;
1030 const struct xt_entry_target *t;
1031 unsigned int entry_offset;
1034 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1035 entry_offset = (void *)e - base;
1036 xt_ematch_foreach(ematch, e)
1037 off += xt_compat_match_offset(ematch->u.kernel.match);
1038 t = ip6t_get_target_c(e);
1039 off += xt_compat_target_offset(t->u.kernel.target);
1040 newinfo->size -= off;
1041 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1045 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1046 if (info->hook_entry[i] &&
1047 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1048 newinfo->hook_entry[i] -= off;
1049 if (info->underflow[i] &&
1050 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1051 newinfo->underflow[i] -= off;
1056 static int compat_table_info(const struct xt_table_info *info,
1057 struct xt_table_info *newinfo)
1059 struct ip6t_entry *iter;
1060 void *loc_cpu_entry;
1063 if (!newinfo || !info)
1066 /* we dont care about newinfo->entries[] */
1067 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1068 newinfo->initial_entries = 0;
1069 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1070 xt_compat_init_offsets(AF_INET6, info->number);
1071 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1072 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1080 static int get_info(struct net *net, void __user *user,
1081 const int *len, int compat)
1083 char name[XT_TABLE_MAXNAMELEN];
1087 if (*len != sizeof(struct ip6t_getinfo)) {
1088 duprintf("length %u != %zu\n", *len,
1089 sizeof(struct ip6t_getinfo));
1093 if (copy_from_user(name, user, sizeof(name)) != 0)
1096 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1097 #ifdef CONFIG_COMPAT
1099 xt_compat_lock(AF_INET6);
1101 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1102 "ip6table_%s", name);
1103 if (!IS_ERR_OR_NULL(t)) {
1104 struct ip6t_getinfo info;
1105 const struct xt_table_info *private = t->private;
1106 #ifdef CONFIG_COMPAT
1107 struct xt_table_info tmp;
1110 ret = compat_table_info(private, &tmp);
1111 xt_compat_flush_offsets(AF_INET6);
1115 memset(&info, 0, sizeof(info));
1116 info.valid_hooks = t->valid_hooks;
1117 memcpy(info.hook_entry, private->hook_entry,
1118 sizeof(info.hook_entry));
1119 memcpy(info.underflow, private->underflow,
1120 sizeof(info.underflow));
1121 info.num_entries = private->number;
1122 info.size = private->size;
1123 strcpy(info.name, name);
1125 if (copy_to_user(user, &info, *len) != 0)
1133 ret = t ? PTR_ERR(t) : -ENOENT;
1134 #ifdef CONFIG_COMPAT
1136 xt_compat_unlock(AF_INET6);
1142 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1146 struct ip6t_get_entries get;
1149 if (*len < sizeof(get)) {
1150 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1153 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1155 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1156 duprintf("get_entries: %u != %zu\n",
1157 *len, sizeof(get) + get.size);
1161 t = xt_find_table_lock(net, AF_INET6, get.name);
1162 if (!IS_ERR_OR_NULL(t)) {
1163 struct xt_table_info *private = t->private;
1164 duprintf("t->private->number = %u\n", private->number);
1165 if (get.size == private->size)
1166 ret = copy_entries_to_user(private->size,
1167 t, uptr->entrytable);
1169 duprintf("get_entries: I've got %u not %u!\n",
1170 private->size, get.size);
1176 ret = t ? PTR_ERR(t) : -ENOENT;
1182 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1183 struct xt_table_info *newinfo, unsigned int num_counters,
1184 void __user *counters_ptr)
1188 struct xt_table_info *oldinfo;
1189 struct xt_counters *counters;
1190 const void *loc_cpu_old_entry;
1191 struct ip6t_entry *iter;
1194 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1200 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1201 "ip6table_%s", name);
1202 if (IS_ERR_OR_NULL(t)) {
1203 ret = t ? PTR_ERR(t) : -ENOENT;
1204 goto free_newinfo_counters_untrans;
1208 if (valid_hooks != t->valid_hooks) {
1209 duprintf("Valid hook crap: %08X vs %08X\n",
1210 valid_hooks, t->valid_hooks);
1215 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1219 /* Update module usage count based on number of rules */
1220 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1221 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1222 if ((oldinfo->number > oldinfo->initial_entries) ||
1223 (newinfo->number <= oldinfo->initial_entries))
1225 if ((oldinfo->number > oldinfo->initial_entries) &&
1226 (newinfo->number <= oldinfo->initial_entries))
1229 /* Get the old counters, and synchronize with replace */
1230 get_counters(oldinfo, counters);
1232 /* Decrease module usage counts and free resource */
1233 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1234 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1235 cleanup_entry(iter, net);
1237 xt_free_table_info(oldinfo);
1238 if (copy_to_user(counters_ptr, counters,
1239 sizeof(struct xt_counters) * num_counters) != 0) {
1240 /* Silent error, can't fail, new table is already in place */
1241 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1250 free_newinfo_counters_untrans:
1257 do_replace(struct net *net, const void __user *user, unsigned int len)
1260 struct ip6t_replace tmp;
1261 struct xt_table_info *newinfo;
1262 void *loc_cpu_entry;
1263 struct ip6t_entry *iter;
1265 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1268 /* overflow check */
1269 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1271 tmp.name[sizeof(tmp.name)-1] = 0;
1273 newinfo = xt_alloc_table_info(tmp.size);
1277 /* choose the copy that is on our node/cpu */
1278 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1279 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1285 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1289 duprintf("ip_tables: Translated table\n");
1291 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1292 tmp.num_counters, tmp.counters);
1294 goto free_newinfo_untrans;
1297 free_newinfo_untrans:
1298 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1299 cleanup_entry(iter, net);
1301 xt_free_table_info(newinfo);
1306 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1309 unsigned int i, curcpu;
1310 struct xt_counters_info tmp;
1311 struct xt_counters *paddc;
1312 unsigned int num_counters;
1317 const struct xt_table_info *private;
1319 const void *loc_cpu_entry;
1320 struct ip6t_entry *iter;
1321 unsigned int addend;
1322 #ifdef CONFIG_COMPAT
1323 struct compat_xt_counters_info compat_tmp;
1327 size = sizeof(struct compat_xt_counters_info);
1332 size = sizeof(struct xt_counters_info);
1335 if (copy_from_user(ptmp, user, size) != 0)
1338 #ifdef CONFIG_COMPAT
1340 num_counters = compat_tmp.num_counters;
1341 name = compat_tmp.name;
1345 num_counters = tmp.num_counters;
1349 if (len != size + num_counters * sizeof(struct xt_counters))
1352 paddc = vmalloc(len - size);
1356 if (copy_from_user(paddc, user + size, len - size) != 0) {
1361 t = xt_find_table_lock(net, AF_INET6, name);
1362 if (IS_ERR_OR_NULL(t)) {
1363 ret = t ? PTR_ERR(t) : -ENOENT;
1369 private = t->private;
1370 if (private->number != num_counters) {
1372 goto unlock_up_free;
1376 /* Choose the copy that is on our node */
1377 curcpu = smp_processor_id();
1378 addend = xt_write_recseq_begin();
1379 loc_cpu_entry = private->entries[curcpu];
1380 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1381 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1384 xt_write_recseq_end(addend);
1396 #ifdef CONFIG_COMPAT
1397 struct compat_ip6t_replace {
1398 char name[XT_TABLE_MAXNAMELEN];
1402 u32 hook_entry[NF_INET_NUMHOOKS];
1403 u32 underflow[NF_INET_NUMHOOKS];
1405 compat_uptr_t counters; /* struct xt_counters * */
1406 struct compat_ip6t_entry entries[0];
1410 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1411 unsigned int *size, struct xt_counters *counters,
1414 struct xt_entry_target *t;
1415 struct compat_ip6t_entry __user *ce;
1416 u_int16_t target_offset, next_offset;
1417 compat_uint_t origsize;
1418 const struct xt_entry_match *ematch;
1422 ce = (struct compat_ip6t_entry __user *)*dstptr;
1423 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1424 copy_to_user(&ce->counters, &counters[i],
1425 sizeof(counters[i])) != 0)
1428 *dstptr += sizeof(struct compat_ip6t_entry);
1429 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1431 xt_ematch_foreach(ematch, e) {
1432 ret = xt_compat_match_to_user(ematch, dstptr, size);
1436 target_offset = e->target_offset - (origsize - *size);
1437 t = ip6t_get_target(e);
1438 ret = xt_compat_target_to_user(t, dstptr, size);
1441 next_offset = e->next_offset - (origsize - *size);
1442 if (put_user(target_offset, &ce->target_offset) != 0 ||
1443 put_user(next_offset, &ce->next_offset) != 0)
1449 compat_find_calc_match(struct xt_entry_match *m,
1451 const struct ip6t_ip6 *ipv6,
1452 unsigned int hookmask,
1455 struct xt_match *match;
1457 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1458 m->u.user.revision);
1459 if (IS_ERR(match)) {
1460 duprintf("compat_check_calc_match: `%s' not found\n",
1462 return PTR_ERR(match);
1464 m->u.kernel.match = match;
1465 *size += xt_compat_match_offset(match);
1469 static void compat_release_entry(struct compat_ip6t_entry *e)
1471 struct xt_entry_target *t;
1472 struct xt_entry_match *ematch;
1474 /* Cleanup all matches */
1475 xt_ematch_foreach(ematch, e)
1476 module_put(ematch->u.kernel.match->me);
1477 t = compat_ip6t_get_target(e);
1478 module_put(t->u.kernel.target->me);
1482 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1483 struct xt_table_info *newinfo,
1485 const unsigned char *base,
1486 const unsigned char *limit,
1487 const unsigned int *hook_entries,
1488 const unsigned int *underflows,
1491 struct xt_entry_match *ematch;
1492 struct xt_entry_target *t;
1493 struct xt_target *target;
1494 unsigned int entry_offset;
1498 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1499 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1500 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1501 duprintf("Bad offset %p, limit = %p\n", e, limit);
1505 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1506 sizeof(struct compat_xt_entry_target)) {
1507 duprintf("checking: element %p size %u\n",
1512 /* For purposes of check_entry casting the compat entry is fine */
1513 ret = check_entry((struct ip6t_entry *)e, name);
1517 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1518 entry_offset = (void *)e - (void *)base;
1520 xt_ematch_foreach(ematch, e) {
1521 ret = compat_find_calc_match(ematch, name,
1522 &e->ipv6, e->comefrom, &off);
1524 goto release_matches;
1528 t = compat_ip6t_get_target(e);
1529 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1530 t->u.user.revision);
1531 if (IS_ERR(target)) {
1532 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1534 ret = PTR_ERR(target);
1535 goto release_matches;
1537 t->u.kernel.target = target;
1539 off += xt_compat_target_offset(target);
1541 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1545 /* Check hooks & underflows */
1546 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1547 if ((unsigned char *)e - base == hook_entries[h])
1548 newinfo->hook_entry[h] = hook_entries[h];
1549 if ((unsigned char *)e - base == underflows[h])
1550 newinfo->underflow[h] = underflows[h];
1553 /* Clear counters and comefrom */
1554 memset(&e->counters, 0, sizeof(e->counters));
1559 module_put(t->u.kernel.target->me);
1561 xt_ematch_foreach(ematch, e) {
1564 module_put(ematch->u.kernel.match->me);
1570 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1571 unsigned int *size, const char *name,
1572 struct xt_table_info *newinfo, unsigned char *base)
1574 struct xt_entry_target *t;
1575 struct ip6t_entry *de;
1576 unsigned int origsize;
1578 struct xt_entry_match *ematch;
1582 de = (struct ip6t_entry *)*dstptr;
1583 memcpy(de, e, sizeof(struct ip6t_entry));
1584 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1586 *dstptr += sizeof(struct ip6t_entry);
1587 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1589 xt_ematch_foreach(ematch, e) {
1590 ret = xt_compat_match_from_user(ematch, dstptr, size);
1594 de->target_offset = e->target_offset - (origsize - *size);
1595 t = compat_ip6t_get_target(e);
1596 xt_compat_target_from_user(t, dstptr, size);
1598 de->next_offset = e->next_offset - (origsize - *size);
1599 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1600 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1601 newinfo->hook_entry[h] -= origsize - *size;
1602 if ((unsigned char *)de - base < newinfo->underflow[h])
1603 newinfo->underflow[h] -= origsize - *size;
1608 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1613 struct xt_mtchk_param mtpar;
1614 struct xt_entry_match *ematch;
1619 mtpar.entryinfo = &e->ipv6;
1620 mtpar.hook_mask = e->comefrom;
1621 mtpar.family = NFPROTO_IPV6;
1622 xt_ematch_foreach(ematch, e) {
1623 ret = check_match(ematch, &mtpar);
1625 goto cleanup_matches;
1629 ret = check_target(e, net, name);
1631 goto cleanup_matches;
1635 xt_ematch_foreach(ematch, e) {
1638 cleanup_match(ematch, net);
1644 translate_compat_table(struct net *net,
1646 unsigned int valid_hooks,
1647 struct xt_table_info **pinfo,
1649 unsigned int total_size,
1650 unsigned int number,
1651 unsigned int *hook_entries,
1652 unsigned int *underflows)
1655 struct xt_table_info *newinfo, *info;
1656 void *pos, *entry0, *entry1;
1657 struct compat_ip6t_entry *iter0;
1658 struct ip6t_entry *iter1;
1665 info->number = number;
1667 /* Init all hooks to impossible value. */
1668 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1669 info->hook_entry[i] = 0xFFFFFFFF;
1670 info->underflow[i] = 0xFFFFFFFF;
1673 duprintf("translate_compat_table: size %u\n", info->size);
1675 xt_compat_lock(AF_INET6);
1676 xt_compat_init_offsets(AF_INET6, number);
1677 /* Walk through entries, checking offsets. */
1678 xt_entry_foreach(iter0, entry0, total_size) {
1679 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1681 entry0 + total_size,
1692 duprintf("translate_compat_table: %u not %u entries\n",
1697 /* Check hooks all assigned */
1698 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1699 /* Only hooks which are valid */
1700 if (!(valid_hooks & (1 << i)))
1702 if (info->hook_entry[i] == 0xFFFFFFFF) {
1703 duprintf("Invalid hook entry %u %u\n",
1704 i, hook_entries[i]);
1707 if (info->underflow[i] == 0xFFFFFFFF) {
1708 duprintf("Invalid underflow %u %u\n",
1715 newinfo = xt_alloc_table_info(size);
1719 newinfo->number = number;
1720 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1721 newinfo->hook_entry[i] = info->hook_entry[i];
1722 newinfo->underflow[i] = info->underflow[i];
1724 entry1 = newinfo->entries[raw_smp_processor_id()];
1727 xt_entry_foreach(iter0, entry0, total_size) {
1728 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1729 name, newinfo, entry1);
1733 xt_compat_flush_offsets(AF_INET6);
1734 xt_compat_unlock(AF_INET6);
1739 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1743 xt_entry_foreach(iter1, entry1, newinfo->size) {
1744 ret = compat_check_entry(iter1, net, name);
1748 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1749 XT_ERROR_TARGET) == 0)
1750 ++newinfo->stacksize;
1754 * The first i matches need cleanup_entry (calls ->destroy)
1755 * because they had called ->check already. The other j-i
1756 * entries need only release.
1760 xt_entry_foreach(iter0, entry0, newinfo->size) {
1765 compat_release_entry(iter0);
1767 xt_entry_foreach(iter1, entry1, newinfo->size) {
1770 cleanup_entry(iter1, net);
1772 xt_free_table_info(newinfo);
1776 /* And one copy for every other CPU */
1777 for_each_possible_cpu(i)
1778 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1779 memcpy(newinfo->entries[i], entry1, newinfo->size);
1783 xt_free_table_info(info);
1787 xt_free_table_info(newinfo);
1789 xt_entry_foreach(iter0, entry0, total_size) {
1792 compat_release_entry(iter0);
1796 xt_compat_flush_offsets(AF_INET6);
1797 xt_compat_unlock(AF_INET6);
1802 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1805 struct compat_ip6t_replace tmp;
1806 struct xt_table_info *newinfo;
1807 void *loc_cpu_entry;
1808 struct ip6t_entry *iter;
1810 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1813 /* overflow check */
1814 if (tmp.size >= INT_MAX / num_possible_cpus())
1816 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1818 tmp.name[sizeof(tmp.name)-1] = 0;
1820 newinfo = xt_alloc_table_info(tmp.size);
1824 /* choose the copy that is on our node/cpu */
1825 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1826 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1832 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1833 &newinfo, &loc_cpu_entry, tmp.size,
1834 tmp.num_entries, tmp.hook_entry,
1839 duprintf("compat_do_replace: Translated table\n");
1841 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1842 tmp.num_counters, compat_ptr(tmp.counters));
1844 goto free_newinfo_untrans;
1847 free_newinfo_untrans:
1848 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1849 cleanup_entry(iter, net);
1851 xt_free_table_info(newinfo);
1856 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1861 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1865 case IP6T_SO_SET_REPLACE:
1866 ret = compat_do_replace(sock_net(sk), user, len);
1869 case IP6T_SO_SET_ADD_COUNTERS:
1870 ret = do_add_counters(sock_net(sk), user, len, 1);
1874 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1881 struct compat_ip6t_get_entries {
1882 char name[XT_TABLE_MAXNAMELEN];
1884 struct compat_ip6t_entry entrytable[0];
1888 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1889 void __user *userptr)
1891 struct xt_counters *counters;
1892 const struct xt_table_info *private = table->private;
1896 const void *loc_cpu_entry;
1898 struct ip6t_entry *iter;
1900 counters = alloc_counters(table);
1901 if (IS_ERR(counters))
1902 return PTR_ERR(counters);
1904 /* choose the copy that is on our node/cpu, ...
1905 * This choice is lazy (because current thread is
1906 * allowed to migrate to another cpu)
1908 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1911 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1912 ret = compat_copy_entry_to_user(iter, &pos,
1913 &size, counters, i++);
1923 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1927 struct compat_ip6t_get_entries get;
1930 if (*len < sizeof(get)) {
1931 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1935 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1938 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1939 duprintf("compat_get_entries: %u != %zu\n",
1940 *len, sizeof(get) + get.size);
1944 xt_compat_lock(AF_INET6);
1945 t = xt_find_table_lock(net, AF_INET6, get.name);
1946 if (!IS_ERR_OR_NULL(t)) {
1947 const struct xt_table_info *private = t->private;
1948 struct xt_table_info info;
1949 duprintf("t->private->number = %u\n", private->number);
1950 ret = compat_table_info(private, &info);
1951 if (!ret && get.size == info.size) {
1952 ret = compat_copy_entries_to_user(private->size,
1953 t, uptr->entrytable);
1955 duprintf("compat_get_entries: I've got %u not %u!\n",
1956 private->size, get.size);
1959 xt_compat_flush_offsets(AF_INET6);
1963 ret = t ? PTR_ERR(t) : -ENOENT;
1965 xt_compat_unlock(AF_INET6);
1969 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1972 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1976 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1980 case IP6T_SO_GET_INFO:
1981 ret = get_info(sock_net(sk), user, len, 1);
1983 case IP6T_SO_GET_ENTRIES:
1984 ret = compat_get_entries(sock_net(sk), user, len);
1987 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1994 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1998 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2002 case IP6T_SO_SET_REPLACE:
2003 ret = do_replace(sock_net(sk), user, len);
2006 case IP6T_SO_SET_ADD_COUNTERS:
2007 ret = do_add_counters(sock_net(sk), user, len, 0);
2011 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2019 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2023 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2027 case IP6T_SO_GET_INFO:
2028 ret = get_info(sock_net(sk), user, len, 0);
2031 case IP6T_SO_GET_ENTRIES:
2032 ret = get_entries(sock_net(sk), user, len);
2035 case IP6T_SO_GET_REVISION_MATCH:
2036 case IP6T_SO_GET_REVISION_TARGET: {
2037 struct xt_get_revision rev;
2040 if (*len != sizeof(rev)) {
2044 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2048 rev.name[sizeof(rev.name)-1] = 0;
2050 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2055 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2058 "ip6t_%s", rev.name);
2063 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2070 struct xt_table *ip6t_register_table(struct net *net,
2071 const struct xt_table *table,
2072 const struct ip6t_replace *repl)
2075 struct xt_table_info *newinfo;
2076 struct xt_table_info bootstrap = {0};
2077 void *loc_cpu_entry;
2078 struct xt_table *new_table;
2080 newinfo = xt_alloc_table_info(repl->size);
2086 /* choose the copy on our node/cpu, but dont care about preemption */
2087 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2088 memcpy(loc_cpu_entry, repl->entries, repl->size);
2090 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2094 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2095 if (IS_ERR(new_table)) {
2096 ret = PTR_ERR(new_table);
2102 xt_free_table_info(newinfo);
2104 return ERR_PTR(ret);
2107 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2109 struct xt_table_info *private;
2110 void *loc_cpu_entry;
2111 struct module *table_owner = table->me;
2112 struct ip6t_entry *iter;
2114 private = xt_unregister_table(table);
2116 /* Decrease module usage counts and free resources */
2117 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2118 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2119 cleanup_entry(iter, net);
2120 if (private->number > private->initial_entries)
2121 module_put(table_owner);
2122 xt_free_table_info(private);
2125 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2127 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2128 u_int8_t type, u_int8_t code,
2131 return (type == test_type && code >= min_code && code <= max_code)
2136 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2138 const struct icmp6hdr *ic;
2139 struct icmp6hdr _icmph;
2140 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2142 /* Must not be a fragment. */
2143 if (par->fragoff != 0)
2146 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2148 /* We've been asked to examine this packet, and we
2149 * can't. Hence, no choice but to drop.
2151 duprintf("Dropping evil ICMP tinygram.\n");
2152 par->hotdrop = true;
2156 return icmp6_type_code_match(icmpinfo->type,
2159 ic->icmp6_type, ic->icmp6_code,
2160 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2163 /* Called when user tries to insert an entry of this type. */
2164 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2166 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2168 /* Must specify no unknown invflags */
2169 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2172 /* The built-in targets: standard (NULL) and error. */
2173 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2175 .name = XT_STANDARD_TARGET,
2176 .targetsize = sizeof(int),
2177 .family = NFPROTO_IPV6,
2178 #ifdef CONFIG_COMPAT
2179 .compatsize = sizeof(compat_int_t),
2180 .compat_from_user = compat_standard_from_user,
2181 .compat_to_user = compat_standard_to_user,
2185 .name = XT_ERROR_TARGET,
2186 .target = ip6t_error,
2187 .targetsize = XT_FUNCTION_MAXNAMELEN,
2188 .family = NFPROTO_IPV6,
2192 static struct nf_sockopt_ops ip6t_sockopts = {
2194 .set_optmin = IP6T_BASE_CTL,
2195 .set_optmax = IP6T_SO_SET_MAX+1,
2196 .set = do_ip6t_set_ctl,
2197 #ifdef CONFIG_COMPAT
2198 .compat_set = compat_do_ip6t_set_ctl,
2200 .get_optmin = IP6T_BASE_CTL,
2201 .get_optmax = IP6T_SO_GET_MAX+1,
2202 .get = do_ip6t_get_ctl,
2203 #ifdef CONFIG_COMPAT
2204 .compat_get = compat_do_ip6t_get_ctl,
2206 .owner = THIS_MODULE,
2209 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2212 .match = icmp6_match,
2213 .matchsize = sizeof(struct ip6t_icmp),
2214 .checkentry = icmp6_checkentry,
2215 .proto = IPPROTO_ICMPV6,
2216 .family = NFPROTO_IPV6,
2220 static int __net_init ip6_tables_net_init(struct net *net)
2222 return xt_proto_init(net, NFPROTO_IPV6);
2225 static void __net_exit ip6_tables_net_exit(struct net *net)
2227 xt_proto_fini(net, NFPROTO_IPV6);
2230 static struct pernet_operations ip6_tables_net_ops = {
2231 .init = ip6_tables_net_init,
2232 .exit = ip6_tables_net_exit,
2235 static int __init ip6_tables_init(void)
2239 ret = register_pernet_subsys(&ip6_tables_net_ops);
2243 /* No one else will be downing sem now, so we won't sleep */
2244 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2247 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2251 /* Register setsockopt */
2252 ret = nf_register_sockopt(&ip6t_sockopts);
2256 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2260 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2262 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2264 unregister_pernet_subsys(&ip6_tables_net_ops);
2269 static void __exit ip6_tables_fini(void)
2271 nf_unregister_sockopt(&ip6t_sockopts);
2273 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2274 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2275 unregister_pernet_subsys(&ip6_tables_net_ops);
2278 EXPORT_SYMBOL(ip6t_register_table);
2279 EXPORT_SYMBOL(ip6t_unregister_table);
2280 EXPORT_SYMBOL(ip6t_do_table);
2282 module_init(ip6_tables_init);
2283 module_exit(ip6_tables_fini);