2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
49 #define dprintf(format, args...)
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
55 #define duprintf(format, args...)
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 return xt_alloc_initial_table(ip6t, IP6T);
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
88 ip6_packet_match(const struct sk_buff *skb,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
133 /* ... might want to do something with class and flowlabel here ... */
135 /* look for the desired protocol header */
136 if((ip6info->flags & IP6T_F_PROTO)) {
138 unsigned short _frag_off;
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
146 *fragoff = _frag_off;
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
153 if (ip6info->proto == protohdr) {
154 if(ip6info->invflags & IP6T_INV_PROTO) {
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
168 /* should be ip6 safe */
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
196 return (struct ip6t_entry *)(base + offset);
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
203 static const struct ip6t_ip6 uncond;
205 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
208 static inline const struct xt_entry_target *
209 ip6t_get_target_c(const struct ip6t_entry *e)
211 return ip6t_get_target((struct ip6t_entry *)e);
214 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
215 /* This cries for unification! */
216 static const char *const hooknames[] = {
217 [NF_INET_PRE_ROUTING] = "PREROUTING",
218 [NF_INET_LOCAL_IN] = "INPUT",
219 [NF_INET_FORWARD] = "FORWARD",
220 [NF_INET_LOCAL_OUT] = "OUTPUT",
221 [NF_INET_POST_ROUTING] = "POSTROUTING",
224 enum nf_ip_trace_comments {
225 NF_IP6_TRACE_COMMENT_RULE,
226 NF_IP6_TRACE_COMMENT_RETURN,
227 NF_IP6_TRACE_COMMENT_POLICY,
230 static const char *const comments[] = {
231 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
232 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
233 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
236 static struct nf_loginfo trace_loginfo = {
237 .type = NF_LOG_TYPE_LOG,
240 .level = LOGLEVEL_WARNING,
241 .logflags = NF_LOG_MASK,
246 /* Mildly perf critical (only if packet tracing is on) */
248 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
249 const char *hookname, const char **chainname,
250 const char **comment, unsigned int *rulenum)
252 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
254 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
255 /* Head of user chain: ERROR target with chainname */
256 *chainname = t->target.data;
261 if (s->target_offset == sizeof(struct ip6t_entry) &&
262 strcmp(t->target.u.kernel.target->name,
263 XT_STANDARD_TARGET) == 0 &&
265 unconditional(&s->ipv6)) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
278 static void trace_packet(const struct sk_buff *skb,
280 const struct net_device *in,
281 const struct net_device *out,
282 const char *tablename,
283 const struct xt_table_info *private,
284 const struct ip6t_entry *e)
286 const void *table_base;
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
291 struct net *net = dev_net(in ? in : out);
293 table_base = private->entries[smp_processor_id()];
294 root = get_entry(table_base, private->hook_entry[hook]);
296 hookname = chainname = hooknames[hook];
297 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
299 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
300 if (get_chainname_rulenum(iter, e, hookname,
301 &chainname, &comment, &rulenum) != 0)
304 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
305 "TRACE: %s:%s:%s:%u ",
306 tablename, chainname, comment, rulenum);
310 static inline __pure struct ip6t_entry *
311 ip6t_next_entry(const struct ip6t_entry *entry)
313 return (void *)entry + entry->next_offset;
316 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
318 ip6t_do_table(struct sk_buff *skb,
320 const struct nf_hook_state *state,
321 struct xt_table *table)
323 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
324 /* Initializing verdict to NF_DROP keeps gcc happy. */
325 unsigned int verdict = NF_DROP;
326 const char *indev, *outdev;
327 const void *table_base;
328 struct ip6t_entry *e, **jumpstack;
329 unsigned int *stackptr, origptr, cpu;
330 const struct xt_table_info *private;
331 struct xt_action_param acpar;
335 indev = state->in ? state->in->name : nulldevname;
336 outdev = state->out ? state->out->name : nulldevname;
337 /* We handle fragments by dealing with the first fragment as
338 * if it was a normal packet. All other fragments are treated
339 * normally, except that they will NEVER match rules that ask
340 * things we don't know, ie. tcp syn flag or ports). If the
341 * rule is also a fragment-specific rule, non-fragments won't
343 acpar.hotdrop = false;
344 acpar.in = state->in;
345 acpar.out = state->out;
346 acpar.family = NFPROTO_IPV6;
347 acpar.hooknum = hook;
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
352 addend = xt_write_recseq_begin();
353 private = table->private;
355 * Ensure we load private-> members after we've fetched the base
358 smp_read_barrier_depends();
359 cpu = smp_processor_id();
360 table_base = private->entries[cpu];
361 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
362 stackptr = per_cpu_ptr(private->stackptr, cpu);
365 e = get_entry(table_base, private->hook_entry[hook]);
368 const struct xt_entry_target *t;
369 const struct xt_entry_match *ematch;
370 struct xt_counters *counter;
374 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
375 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
377 e = ip6t_next_entry(e);
381 xt_ematch_foreach(ematch, e) {
382 acpar.match = ematch->u.kernel.match;
383 acpar.matchinfo = ematch->data;
384 if (!acpar.match->match(skb, &acpar))
388 counter = xt_get_this_cpu_counter(&e->counters);
389 ADD_COUNTER(*counter, skb->len, 1);
391 t = ip6t_get_target_c(e);
392 IP_NF_ASSERT(t->u.kernel.target);
394 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
395 /* The packet is traced: log it */
396 if (unlikely(skb->nf_trace))
397 trace_packet(skb, hook, state->in, state->out,
398 table->name, private, e);
400 /* Standard target? */
401 if (!t->u.kernel.target->target) {
404 v = ((struct xt_standard_target *)t)->verdict;
406 /* Pop from stack? */
407 if (v != XT_RETURN) {
408 verdict = (unsigned int)(-v) - 1;
411 if (*stackptr <= origptr)
412 e = get_entry(table_base,
413 private->underflow[hook]);
415 e = ip6t_next_entry(jumpstack[--*stackptr]);
418 if (table_base + v != ip6t_next_entry(e) &&
419 !(e->ipv6.flags & IP6T_F_GOTO)) {
420 if (*stackptr >= private->stacksize) {
424 jumpstack[(*stackptr)++] = e;
427 e = get_entry(table_base, v);
431 acpar.target = t->u.kernel.target;
432 acpar.targinfo = t->data;
434 verdict = t->u.kernel.target->target(skb, &acpar);
435 if (verdict == XT_CONTINUE)
436 e = ip6t_next_entry(e);
440 } while (!acpar.hotdrop);
444 xt_write_recseq_end(addend);
447 #ifdef DEBUG_ALLOW_ALL
456 /* Figures out from what hook each rule can be called: returns 0 if
457 there are loops. Puts hook bitmask in comefrom. */
459 mark_source_chains(const struct xt_table_info *newinfo,
460 unsigned int valid_hooks, void *entry0)
464 /* No recursion; use packet counter to save back ptrs (reset
465 to 0 as we leave), and comefrom to save source hook bitmask */
466 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
467 unsigned int pos = newinfo->hook_entry[hook];
468 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
470 if (!(valid_hooks & (1 << hook)))
473 /* Set initial back pointer. */
474 e->counters.pcnt = pos;
477 const struct xt_standard_target *t
478 = (void *)ip6t_get_target_c(e);
479 int visited = e->comefrom & (1 << hook);
481 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
482 pr_err("iptables: loop hook %u pos %u %08X.\n",
483 hook, pos, e->comefrom);
486 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
488 /* Unconditional return/END. */
489 if ((e->target_offset == sizeof(struct ip6t_entry) &&
490 (strcmp(t->target.u.user.name,
491 XT_STANDARD_TARGET) == 0) &&
493 unconditional(&e->ipv6)) || visited) {
494 unsigned int oldpos, size;
496 if ((strcmp(t->target.u.user.name,
497 XT_STANDARD_TARGET) == 0) &&
498 t->verdict < -NF_MAX_VERDICT - 1) {
499 duprintf("mark_source_chains: bad "
500 "negative verdict (%i)\n",
505 /* Return: backtrack through the last
508 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
509 #ifdef DEBUG_IP_FIREWALL_USER
511 & (1 << NF_INET_NUMHOOKS)) {
512 duprintf("Back unset "
519 pos = e->counters.pcnt;
520 e->counters.pcnt = 0;
522 /* We're at the start. */
526 e = (struct ip6t_entry *)
528 } while (oldpos == pos + e->next_offset);
531 size = e->next_offset;
532 e = (struct ip6t_entry *)
533 (entry0 + pos + size);
534 e->counters.pcnt = pos;
537 int newpos = t->verdict;
539 if (strcmp(t->target.u.user.name,
540 XT_STANDARD_TARGET) == 0 &&
542 if (newpos > newinfo->size -
543 sizeof(struct ip6t_entry)) {
544 duprintf("mark_source_chains: "
545 "bad verdict (%i)\n",
549 /* This a jump; chase it. */
550 duprintf("Jump rule %u -> %u\n",
553 /* ... this is a fallthru */
554 newpos = pos + e->next_offset;
556 e = (struct ip6t_entry *)
558 e->counters.pcnt = pos;
563 duprintf("Finished chain %u\n", hook);
568 static void cleanup_match(struct xt_entry_match *m, struct net *net)
570 struct xt_mtdtor_param par;
573 par.match = m->u.kernel.match;
574 par.matchinfo = m->data;
575 par.family = NFPROTO_IPV6;
576 if (par.match->destroy != NULL)
577 par.match->destroy(&par);
578 module_put(par.match->me);
582 check_entry(const struct ip6t_entry *e, const char *name)
584 const struct xt_entry_target *t;
586 if (!ip6_checkentry(&e->ipv6)) {
587 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
591 if (e->target_offset + sizeof(struct xt_entry_target) >
595 t = ip6t_get_target_c(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
602 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
604 const struct ip6t_ip6 *ipv6 = par->entryinfo;
607 par->match = m->u.kernel.match;
608 par->matchinfo = m->data;
610 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
611 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
613 duprintf("ip_tables: check failed for `%s'.\n",
621 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
623 struct xt_match *match;
626 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
629 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
630 return PTR_ERR(match);
632 m->u.kernel.match = match;
634 ret = check_match(m, par);
640 module_put(m->u.kernel.match->me);
644 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
646 struct xt_entry_target *t = ip6t_get_target(e);
647 struct xt_tgchk_param par = {
651 .target = t->u.kernel.target,
653 .hook_mask = e->comefrom,
654 .family = NFPROTO_IPV6,
658 t = ip6t_get_target(e);
659 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
660 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
662 duprintf("ip_tables: check failed for `%s'.\n",
663 t->u.kernel.target->name);
670 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
673 struct xt_entry_target *t;
674 struct xt_target *target;
677 struct xt_mtchk_param mtpar;
678 struct xt_entry_match *ematch;
680 ret = check_entry(e, name);
684 e->counters.pcnt = xt_percpu_counter_alloc();
685 if (IS_ERR_VALUE(e->counters.pcnt))
691 mtpar.entryinfo = &e->ipv6;
692 mtpar.hook_mask = e->comefrom;
693 mtpar.family = NFPROTO_IPV6;
694 xt_ematch_foreach(ematch, e) {
695 ret = find_check_match(ematch, &mtpar);
697 goto cleanup_matches;
701 t = ip6t_get_target(e);
702 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
704 if (IS_ERR(target)) {
705 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
706 ret = PTR_ERR(target);
707 goto cleanup_matches;
709 t->u.kernel.target = target;
711 ret = check_target(e, net, name);
716 module_put(t->u.kernel.target->me);
718 xt_ematch_foreach(ematch, e) {
721 cleanup_match(ematch, net);
724 xt_percpu_counter_free(e->counters.pcnt);
729 static bool check_underflow(const struct ip6t_entry *e)
731 const struct xt_entry_target *t;
732 unsigned int verdict;
734 if (!unconditional(&e->ipv6))
736 t = ip6t_get_target_c(e);
737 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
739 verdict = ((struct xt_standard_target *)t)->verdict;
740 verdict = -verdict - 1;
741 return verdict == NF_DROP || verdict == NF_ACCEPT;
745 check_entry_size_and_hooks(struct ip6t_entry *e,
746 struct xt_table_info *newinfo,
747 const unsigned char *base,
748 const unsigned char *limit,
749 const unsigned int *hook_entries,
750 const unsigned int *underflows,
751 unsigned int valid_hooks)
755 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
756 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
757 duprintf("Bad offset %p\n", e);
762 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
763 duprintf("checking: element %p size %u\n",
768 /* Check hooks & underflows */
769 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
770 if (!(valid_hooks & (1 << h)))
772 if ((unsigned char *)e - base == hook_entries[h])
773 newinfo->hook_entry[h] = hook_entries[h];
774 if ((unsigned char *)e - base == underflows[h]) {
775 if (!check_underflow(e)) {
776 pr_err("Underflows must be unconditional and "
777 "use the STANDARD target with "
781 newinfo->underflow[h] = underflows[h];
785 /* Clear counters and comefrom */
786 e->counters = ((struct xt_counters) { 0, 0 });
791 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
793 struct xt_tgdtor_param par;
794 struct xt_entry_target *t;
795 struct xt_entry_match *ematch;
797 /* Cleanup all matches */
798 xt_ematch_foreach(ematch, e)
799 cleanup_match(ematch, net);
800 t = ip6t_get_target(e);
803 par.target = t->u.kernel.target;
804 par.targinfo = t->data;
805 par.family = NFPROTO_IPV6;
806 if (par.target->destroy != NULL)
807 par.target->destroy(&par);
808 module_put(par.target->me);
810 xt_percpu_counter_free(e->counters.pcnt);
813 /* Checks and translates the user-supplied table segment (held in
816 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
817 const struct ip6t_replace *repl)
819 struct ip6t_entry *iter;
823 newinfo->size = repl->size;
824 newinfo->number = repl->num_entries;
826 /* Init all hooks to impossible value. */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 newinfo->hook_entry[i] = 0xFFFFFFFF;
829 newinfo->underflow[i] = 0xFFFFFFFF;
832 duprintf("translate_table: size %u\n", newinfo->size);
834 /* Walk through entries, checking offsets. */
835 xt_entry_foreach(iter, entry0, newinfo->size) {
836 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
844 if (strcmp(ip6t_get_target(iter)->u.user.name,
845 XT_ERROR_TARGET) == 0)
846 ++newinfo->stacksize;
849 if (i != repl->num_entries) {
850 duprintf("translate_table: %u not %u entries\n",
851 i, repl->num_entries);
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(repl->valid_hooks & (1 << i)))
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
862 i, repl->hook_entry[i]);
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
867 i, repl->underflow[i]);
872 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
875 /* Finally, each sanity check must pass */
877 xt_entry_foreach(iter, entry0, newinfo->size) {
878 ret = find_check_entry(iter, net, repl->name, repl->size);
885 xt_entry_foreach(iter, entry0, newinfo->size) {
888 cleanup_entry(iter, net);
893 /* And one copy for every other CPU */
894 for_each_possible_cpu(i) {
895 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
896 memcpy(newinfo->entries[i], entry0, newinfo->size);
903 get_counters(const struct xt_table_info *t,
904 struct xt_counters counters[])
906 struct ip6t_entry *iter;
910 for_each_possible_cpu(cpu) {
911 seqcount_t *s = &per_cpu(xt_recseq, cpu);
914 xt_entry_foreach(iter, t->entries[cpu], t->size) {
915 struct xt_counters *tmp;
919 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
921 start = read_seqcount_begin(s);
924 } while (read_seqcount_retry(s, start));
926 ADD_COUNTER(counters[i], bcnt, pcnt);
932 static struct xt_counters *alloc_counters(const struct xt_table *table)
934 unsigned int countersize;
935 struct xt_counters *counters;
936 const struct xt_table_info *private = table->private;
938 /* We need atomic snapshot of counters: rest doesn't change
939 (other than comefrom, which userspace doesn't care
941 countersize = sizeof(struct xt_counters) * private->number;
942 counters = vzalloc(countersize);
944 if (counters == NULL)
945 return ERR_PTR(-ENOMEM);
947 get_counters(private, counters);
953 copy_entries_to_user(unsigned int total_size,
954 const struct xt_table *table,
955 void __user *userptr)
957 unsigned int off, num;
958 const struct ip6t_entry *e;
959 struct xt_counters *counters;
960 const struct xt_table_info *private = table->private;
962 const void *loc_cpu_entry;
964 counters = alloc_counters(table);
965 if (IS_ERR(counters))
966 return PTR_ERR(counters);
968 /* choose the copy that is on our node/cpu, ...
969 * This choice is lazy (because current thread is
970 * allowed to migrate to another cpu)
972 loc_cpu_entry = private->entries[raw_smp_processor_id()];
973 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
978 /* FIXME: use iterator macros --RR */
979 /* ... then go back and fix counters and names */
980 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
982 const struct xt_entry_match *m;
983 const struct xt_entry_target *t;
985 e = (struct ip6t_entry *)(loc_cpu_entry + off);
986 if (copy_to_user(userptr + off
987 + offsetof(struct ip6t_entry, counters),
989 sizeof(counters[num])) != 0) {
994 for (i = sizeof(struct ip6t_entry);
995 i < e->target_offset;
996 i += m->u.match_size) {
999 if (copy_to_user(userptr + off + i
1000 + offsetof(struct xt_entry_match,
1002 m->u.kernel.match->name,
1003 strlen(m->u.kernel.match->name)+1)
1010 t = ip6t_get_target_c(e);
1011 if (copy_to_user(userptr + off + e->target_offset
1012 + offsetof(struct xt_entry_target,
1014 t->u.kernel.target->name,
1015 strlen(t->u.kernel.target->name)+1) != 0) {
1026 #ifdef CONFIG_COMPAT
1027 static void compat_standard_from_user(void *dst, const void *src)
1029 int v = *(compat_int_t *)src;
1032 v += xt_compat_calc_jump(AF_INET6, v);
1033 memcpy(dst, &v, sizeof(v));
1036 static int compat_standard_to_user(void __user *dst, const void *src)
1038 compat_int_t cv = *(int *)src;
1041 cv -= xt_compat_calc_jump(AF_INET6, cv);
1042 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1045 static int compat_calc_entry(const struct ip6t_entry *e,
1046 const struct xt_table_info *info,
1047 const void *base, struct xt_table_info *newinfo)
1049 const struct xt_entry_match *ematch;
1050 const struct xt_entry_target *t;
1051 unsigned int entry_offset;
1054 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1055 entry_offset = (void *)e - base;
1056 xt_ematch_foreach(ematch, e)
1057 off += xt_compat_match_offset(ematch->u.kernel.match);
1058 t = ip6t_get_target_c(e);
1059 off += xt_compat_target_offset(t->u.kernel.target);
1060 newinfo->size -= off;
1061 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1065 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1066 if (info->hook_entry[i] &&
1067 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1068 newinfo->hook_entry[i] -= off;
1069 if (info->underflow[i] &&
1070 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1071 newinfo->underflow[i] -= off;
1076 static int compat_table_info(const struct xt_table_info *info,
1077 struct xt_table_info *newinfo)
1079 struct ip6t_entry *iter;
1080 void *loc_cpu_entry;
1083 if (!newinfo || !info)
1086 /* we dont care about newinfo->entries[] */
1087 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1088 newinfo->initial_entries = 0;
1089 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1090 xt_compat_init_offsets(AF_INET6, info->number);
1091 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1092 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1100 static int get_info(struct net *net, void __user *user,
1101 const int *len, int compat)
1103 char name[XT_TABLE_MAXNAMELEN];
1107 if (*len != sizeof(struct ip6t_getinfo)) {
1108 duprintf("length %u != %zu\n", *len,
1109 sizeof(struct ip6t_getinfo));
1113 if (copy_from_user(name, user, sizeof(name)) != 0)
1116 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1117 #ifdef CONFIG_COMPAT
1119 xt_compat_lock(AF_INET6);
1121 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1122 "ip6table_%s", name);
1123 if (!IS_ERR_OR_NULL(t)) {
1124 struct ip6t_getinfo info;
1125 const struct xt_table_info *private = t->private;
1126 #ifdef CONFIG_COMPAT
1127 struct xt_table_info tmp;
1130 ret = compat_table_info(private, &tmp);
1131 xt_compat_flush_offsets(AF_INET6);
1135 memset(&info, 0, sizeof(info));
1136 info.valid_hooks = t->valid_hooks;
1137 memcpy(info.hook_entry, private->hook_entry,
1138 sizeof(info.hook_entry));
1139 memcpy(info.underflow, private->underflow,
1140 sizeof(info.underflow));
1141 info.num_entries = private->number;
1142 info.size = private->size;
1143 strcpy(info.name, name);
1145 if (copy_to_user(user, &info, *len) != 0)
1153 ret = t ? PTR_ERR(t) : -ENOENT;
1154 #ifdef CONFIG_COMPAT
1156 xt_compat_unlock(AF_INET6);
1162 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1166 struct ip6t_get_entries get;
1169 if (*len < sizeof(get)) {
1170 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1173 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1175 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1176 duprintf("get_entries: %u != %zu\n",
1177 *len, sizeof(get) + get.size);
1181 t = xt_find_table_lock(net, AF_INET6, get.name);
1182 if (!IS_ERR_OR_NULL(t)) {
1183 struct xt_table_info *private = t->private;
1184 duprintf("t->private->number = %u\n", private->number);
1185 if (get.size == private->size)
1186 ret = copy_entries_to_user(private->size,
1187 t, uptr->entrytable);
1189 duprintf("get_entries: I've got %u not %u!\n",
1190 private->size, get.size);
1196 ret = t ? PTR_ERR(t) : -ENOENT;
1202 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1203 struct xt_table_info *newinfo, unsigned int num_counters,
1204 void __user *counters_ptr)
1208 struct xt_table_info *oldinfo;
1209 struct xt_counters *counters;
1210 const void *loc_cpu_old_entry;
1211 struct ip6t_entry *iter;
1214 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1220 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1221 "ip6table_%s", name);
1222 if (IS_ERR_OR_NULL(t)) {
1223 ret = t ? PTR_ERR(t) : -ENOENT;
1224 goto free_newinfo_counters_untrans;
1228 if (valid_hooks != t->valid_hooks) {
1229 duprintf("Valid hook crap: %08X vs %08X\n",
1230 valid_hooks, t->valid_hooks);
1235 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1239 /* Update module usage count based on number of rules */
1240 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1241 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1242 if ((oldinfo->number > oldinfo->initial_entries) ||
1243 (newinfo->number <= oldinfo->initial_entries))
1245 if ((oldinfo->number > oldinfo->initial_entries) &&
1246 (newinfo->number <= oldinfo->initial_entries))
1249 /* Get the old counters, and synchronize with replace */
1250 get_counters(oldinfo, counters);
1252 /* Decrease module usage counts and free resource */
1253 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1254 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1255 cleanup_entry(iter, net);
1257 xt_free_table_info(oldinfo);
1258 if (copy_to_user(counters_ptr, counters,
1259 sizeof(struct xt_counters) * num_counters) != 0) {
1260 /* Silent error, can't fail, new table is already in place */
1261 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1270 free_newinfo_counters_untrans:
1277 do_replace(struct net *net, const void __user *user, unsigned int len)
1280 struct ip6t_replace tmp;
1281 struct xt_table_info *newinfo;
1282 void *loc_cpu_entry;
1283 struct ip6t_entry *iter;
1285 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1288 /* overflow check */
1289 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1291 if (tmp.num_counters == 0)
1294 tmp.name[sizeof(tmp.name)-1] = 0;
1296 newinfo = xt_alloc_table_info(tmp.size);
1300 /* choose the copy that is on our node/cpu */
1301 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1302 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1308 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1312 duprintf("ip_tables: Translated table\n");
1314 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1315 tmp.num_counters, tmp.counters);
1317 goto free_newinfo_untrans;
1320 free_newinfo_untrans:
1321 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1322 cleanup_entry(iter, net);
1324 xt_free_table_info(newinfo);
1329 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1332 unsigned int i, curcpu;
1333 struct xt_counters_info tmp;
1334 struct xt_counters *paddc;
1335 unsigned int num_counters;
1340 const struct xt_table_info *private;
1342 const void *loc_cpu_entry;
1343 struct ip6t_entry *iter;
1344 unsigned int addend;
1345 #ifdef CONFIG_COMPAT
1346 struct compat_xt_counters_info compat_tmp;
1350 size = sizeof(struct compat_xt_counters_info);
1355 size = sizeof(struct xt_counters_info);
1358 if (copy_from_user(ptmp, user, size) != 0)
1361 #ifdef CONFIG_COMPAT
1363 num_counters = compat_tmp.num_counters;
1364 name = compat_tmp.name;
1368 num_counters = tmp.num_counters;
1372 if (len != size + num_counters * sizeof(struct xt_counters))
1375 paddc = vmalloc(len - size);
1379 if (copy_from_user(paddc, user + size, len - size) != 0) {
1384 t = xt_find_table_lock(net, AF_INET6, name);
1385 if (IS_ERR_OR_NULL(t)) {
1386 ret = t ? PTR_ERR(t) : -ENOENT;
1391 private = t->private;
1392 if (private->number != num_counters) {
1394 goto unlock_up_free;
1398 /* Choose the copy that is on our node */
1399 curcpu = smp_processor_id();
1400 addend = xt_write_recseq_begin();
1401 loc_cpu_entry = private->entries[curcpu];
1402 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1403 struct xt_counters *tmp;
1405 tmp = xt_get_this_cpu_counter(&iter->counters);
1406 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1409 xt_write_recseq_end(addend);
1421 #ifdef CONFIG_COMPAT
1422 struct compat_ip6t_replace {
1423 char name[XT_TABLE_MAXNAMELEN];
1427 u32 hook_entry[NF_INET_NUMHOOKS];
1428 u32 underflow[NF_INET_NUMHOOKS];
1430 compat_uptr_t counters; /* struct xt_counters * */
1431 struct compat_ip6t_entry entries[0];
1435 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1436 unsigned int *size, struct xt_counters *counters,
1439 struct xt_entry_target *t;
1440 struct compat_ip6t_entry __user *ce;
1441 u_int16_t target_offset, next_offset;
1442 compat_uint_t origsize;
1443 const struct xt_entry_match *ematch;
1447 ce = (struct compat_ip6t_entry __user *)*dstptr;
1448 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1449 copy_to_user(&ce->counters, &counters[i],
1450 sizeof(counters[i])) != 0)
1453 *dstptr += sizeof(struct compat_ip6t_entry);
1454 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1456 xt_ematch_foreach(ematch, e) {
1457 ret = xt_compat_match_to_user(ematch, dstptr, size);
1461 target_offset = e->target_offset - (origsize - *size);
1462 t = ip6t_get_target(e);
1463 ret = xt_compat_target_to_user(t, dstptr, size);
1466 next_offset = e->next_offset - (origsize - *size);
1467 if (put_user(target_offset, &ce->target_offset) != 0 ||
1468 put_user(next_offset, &ce->next_offset) != 0)
1474 compat_find_calc_match(struct xt_entry_match *m,
1476 const struct ip6t_ip6 *ipv6,
1479 struct xt_match *match;
1481 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1482 m->u.user.revision);
1483 if (IS_ERR(match)) {
1484 duprintf("compat_check_calc_match: `%s' not found\n",
1486 return PTR_ERR(match);
1488 m->u.kernel.match = match;
1489 *size += xt_compat_match_offset(match);
1493 static void compat_release_entry(struct compat_ip6t_entry *e)
1495 struct xt_entry_target *t;
1496 struct xt_entry_match *ematch;
1498 /* Cleanup all matches */
1499 xt_ematch_foreach(ematch, e)
1500 module_put(ematch->u.kernel.match->me);
1501 t = compat_ip6t_get_target(e);
1502 module_put(t->u.kernel.target->me);
1506 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1507 struct xt_table_info *newinfo,
1509 const unsigned char *base,
1510 const unsigned char *limit,
1511 const unsigned int *hook_entries,
1512 const unsigned int *underflows,
1515 struct xt_entry_match *ematch;
1516 struct xt_entry_target *t;
1517 struct xt_target *target;
1518 unsigned int entry_offset;
1522 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1523 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1524 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1525 duprintf("Bad offset %p, limit = %p\n", e, limit);
1529 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1530 sizeof(struct compat_xt_entry_target)) {
1531 duprintf("checking: element %p size %u\n",
1536 /* For purposes of check_entry casting the compat entry is fine */
1537 ret = check_entry((struct ip6t_entry *)e, name);
1541 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1542 entry_offset = (void *)e - (void *)base;
1544 xt_ematch_foreach(ematch, e) {
1545 ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
1547 goto release_matches;
1551 t = compat_ip6t_get_target(e);
1552 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1553 t->u.user.revision);
1554 if (IS_ERR(target)) {
1555 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1557 ret = PTR_ERR(target);
1558 goto release_matches;
1560 t->u.kernel.target = target;
1562 off += xt_compat_target_offset(target);
1564 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1568 /* Check hooks & underflows */
1569 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1570 if ((unsigned char *)e - base == hook_entries[h])
1571 newinfo->hook_entry[h] = hook_entries[h];
1572 if ((unsigned char *)e - base == underflows[h])
1573 newinfo->underflow[h] = underflows[h];
1576 /* Clear counters and comefrom */
1577 memset(&e->counters, 0, sizeof(e->counters));
1582 module_put(t->u.kernel.target->me);
1584 xt_ematch_foreach(ematch, e) {
1587 module_put(ematch->u.kernel.match->me);
1593 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1594 unsigned int *size, const char *name,
1595 struct xt_table_info *newinfo, unsigned char *base)
1597 struct xt_entry_target *t;
1598 struct ip6t_entry *de;
1599 unsigned int origsize;
1601 struct xt_entry_match *ematch;
1605 de = (struct ip6t_entry *)*dstptr;
1606 memcpy(de, e, sizeof(struct ip6t_entry));
1607 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1609 *dstptr += sizeof(struct ip6t_entry);
1610 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1612 xt_ematch_foreach(ematch, e) {
1613 ret = xt_compat_match_from_user(ematch, dstptr, size);
1617 de->target_offset = e->target_offset - (origsize - *size);
1618 t = compat_ip6t_get_target(e);
1619 xt_compat_target_from_user(t, dstptr, size);
1621 de->next_offset = e->next_offset - (origsize - *size);
1622 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1623 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1624 newinfo->hook_entry[h] -= origsize - *size;
1625 if ((unsigned char *)de - base < newinfo->underflow[h])
1626 newinfo->underflow[h] -= origsize - *size;
1631 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1636 struct xt_mtchk_param mtpar;
1637 struct xt_entry_match *ematch;
1639 e->counters.pcnt = xt_percpu_counter_alloc();
1640 if (IS_ERR_VALUE(e->counters.pcnt))
1645 mtpar.entryinfo = &e->ipv6;
1646 mtpar.hook_mask = e->comefrom;
1647 mtpar.family = NFPROTO_IPV6;
1648 xt_ematch_foreach(ematch, e) {
1649 ret = check_match(ematch, &mtpar);
1651 goto cleanup_matches;
1655 ret = check_target(e, net, name);
1657 goto cleanup_matches;
1661 xt_ematch_foreach(ematch, e) {
1664 cleanup_match(ematch, net);
1667 xt_percpu_counter_free(e->counters.pcnt);
1673 translate_compat_table(struct net *net,
1675 unsigned int valid_hooks,
1676 struct xt_table_info **pinfo,
1678 unsigned int total_size,
1679 unsigned int number,
1680 unsigned int *hook_entries,
1681 unsigned int *underflows)
1684 struct xt_table_info *newinfo, *info;
1685 void *pos, *entry0, *entry1;
1686 struct compat_ip6t_entry *iter0;
1687 struct ip6t_entry *iter1;
1694 info->number = number;
1696 /* Init all hooks to impossible value. */
1697 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1698 info->hook_entry[i] = 0xFFFFFFFF;
1699 info->underflow[i] = 0xFFFFFFFF;
1702 duprintf("translate_compat_table: size %u\n", info->size);
1704 xt_compat_lock(AF_INET6);
1705 xt_compat_init_offsets(AF_INET6, number);
1706 /* Walk through entries, checking offsets. */
1707 xt_entry_foreach(iter0, entry0, total_size) {
1708 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1710 entry0 + total_size,
1721 duprintf("translate_compat_table: %u not %u entries\n",
1726 /* Check hooks all assigned */
1727 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1728 /* Only hooks which are valid */
1729 if (!(valid_hooks & (1 << i)))
1731 if (info->hook_entry[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid hook entry %u %u\n",
1733 i, hook_entries[i]);
1736 if (info->underflow[i] == 0xFFFFFFFF) {
1737 duprintf("Invalid underflow %u %u\n",
1744 newinfo = xt_alloc_table_info(size);
1748 newinfo->number = number;
1749 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1750 newinfo->hook_entry[i] = info->hook_entry[i];
1751 newinfo->underflow[i] = info->underflow[i];
1753 entry1 = newinfo->entries[raw_smp_processor_id()];
1756 xt_entry_foreach(iter0, entry0, total_size) {
1757 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1758 name, newinfo, entry1);
1762 xt_compat_flush_offsets(AF_INET6);
1763 xt_compat_unlock(AF_INET6);
1768 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1772 xt_entry_foreach(iter1, entry1, newinfo->size) {
1773 ret = compat_check_entry(iter1, net, name);
1777 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1778 XT_ERROR_TARGET) == 0)
1779 ++newinfo->stacksize;
1783 * The first i matches need cleanup_entry (calls ->destroy)
1784 * because they had called ->check already. The other j-i
1785 * entries need only release.
1789 xt_entry_foreach(iter0, entry0, newinfo->size) {
1794 compat_release_entry(iter0);
1796 xt_entry_foreach(iter1, entry1, newinfo->size) {
1799 cleanup_entry(iter1, net);
1801 xt_free_table_info(newinfo);
1805 /* And one copy for every other CPU */
1806 for_each_possible_cpu(i)
1807 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1808 memcpy(newinfo->entries[i], entry1, newinfo->size);
1812 xt_free_table_info(info);
1816 xt_free_table_info(newinfo);
1818 xt_entry_foreach(iter0, entry0, total_size) {
1821 compat_release_entry(iter0);
1825 xt_compat_flush_offsets(AF_INET6);
1826 xt_compat_unlock(AF_INET6);
1831 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1834 struct compat_ip6t_replace tmp;
1835 struct xt_table_info *newinfo;
1836 void *loc_cpu_entry;
1837 struct ip6t_entry *iter;
1839 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1842 /* overflow check */
1843 if (tmp.size >= INT_MAX / num_possible_cpus())
1845 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1847 if (tmp.num_counters == 0)
1850 tmp.name[sizeof(tmp.name)-1] = 0;
1852 newinfo = xt_alloc_table_info(tmp.size);
1856 /* choose the copy that is on our node/cpu */
1857 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1858 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1864 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1865 &newinfo, &loc_cpu_entry, tmp.size,
1866 tmp.num_entries, tmp.hook_entry,
1871 duprintf("compat_do_replace: Translated table\n");
1873 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1874 tmp.num_counters, compat_ptr(tmp.counters));
1876 goto free_newinfo_untrans;
1879 free_newinfo_untrans:
1880 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1881 cleanup_entry(iter, net);
1883 xt_free_table_info(newinfo);
1888 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1893 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1897 case IP6T_SO_SET_REPLACE:
1898 ret = compat_do_replace(sock_net(sk), user, len);
1901 case IP6T_SO_SET_ADD_COUNTERS:
1902 ret = do_add_counters(sock_net(sk), user, len, 1);
1906 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1913 struct compat_ip6t_get_entries {
1914 char name[XT_TABLE_MAXNAMELEN];
1916 struct compat_ip6t_entry entrytable[0];
1920 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1921 void __user *userptr)
1923 struct xt_counters *counters;
1924 const struct xt_table_info *private = table->private;
1928 const void *loc_cpu_entry;
1930 struct ip6t_entry *iter;
1932 counters = alloc_counters(table);
1933 if (IS_ERR(counters))
1934 return PTR_ERR(counters);
1936 /* choose the copy that is on our node/cpu, ...
1937 * This choice is lazy (because current thread is
1938 * allowed to migrate to another cpu)
1940 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1943 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1944 ret = compat_copy_entry_to_user(iter, &pos,
1945 &size, counters, i++);
1955 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1959 struct compat_ip6t_get_entries get;
1962 if (*len < sizeof(get)) {
1963 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1967 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1970 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1971 duprintf("compat_get_entries: %u != %zu\n",
1972 *len, sizeof(get) + get.size);
1976 xt_compat_lock(AF_INET6);
1977 t = xt_find_table_lock(net, AF_INET6, get.name);
1978 if (!IS_ERR_OR_NULL(t)) {
1979 const struct xt_table_info *private = t->private;
1980 struct xt_table_info info;
1981 duprintf("t->private->number = %u\n", private->number);
1982 ret = compat_table_info(private, &info);
1983 if (!ret && get.size == info.size) {
1984 ret = compat_copy_entries_to_user(private->size,
1985 t, uptr->entrytable);
1987 duprintf("compat_get_entries: I've got %u not %u!\n",
1988 private->size, get.size);
1991 xt_compat_flush_offsets(AF_INET6);
1995 ret = t ? PTR_ERR(t) : -ENOENT;
1997 xt_compat_unlock(AF_INET6);
2001 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2004 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2008 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2012 case IP6T_SO_GET_INFO:
2013 ret = get_info(sock_net(sk), user, len, 1);
2015 case IP6T_SO_GET_ENTRIES:
2016 ret = compat_get_entries(sock_net(sk), user, len);
2019 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2026 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2030 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2034 case IP6T_SO_SET_REPLACE:
2035 ret = do_replace(sock_net(sk), user, len);
2038 case IP6T_SO_SET_ADD_COUNTERS:
2039 ret = do_add_counters(sock_net(sk), user, len, 0);
2043 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2051 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2055 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2059 case IP6T_SO_GET_INFO:
2060 ret = get_info(sock_net(sk), user, len, 0);
2063 case IP6T_SO_GET_ENTRIES:
2064 ret = get_entries(sock_net(sk), user, len);
2067 case IP6T_SO_GET_REVISION_MATCH:
2068 case IP6T_SO_GET_REVISION_TARGET: {
2069 struct xt_get_revision rev;
2072 if (*len != sizeof(rev)) {
2076 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2080 rev.name[sizeof(rev.name)-1] = 0;
2082 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2087 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2090 "ip6t_%s", rev.name);
2095 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2102 struct xt_table *ip6t_register_table(struct net *net,
2103 const struct xt_table *table,
2104 const struct ip6t_replace *repl)
2107 struct xt_table_info *newinfo;
2108 struct xt_table_info bootstrap = {0};
2109 void *loc_cpu_entry;
2110 struct xt_table *new_table;
2112 newinfo = xt_alloc_table_info(repl->size);
2118 /* choose the copy on our node/cpu, but dont care about preemption */
2119 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2120 memcpy(loc_cpu_entry, repl->entries, repl->size);
2122 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2126 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2127 if (IS_ERR(new_table)) {
2128 ret = PTR_ERR(new_table);
2134 xt_free_table_info(newinfo);
2136 return ERR_PTR(ret);
2139 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2141 struct xt_table_info *private;
2142 void *loc_cpu_entry;
2143 struct module *table_owner = table->me;
2144 struct ip6t_entry *iter;
2146 private = xt_unregister_table(table);
2148 /* Decrease module usage counts and free resources */
2149 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2150 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2151 cleanup_entry(iter, net);
2152 if (private->number > private->initial_entries)
2153 module_put(table_owner);
2154 xt_free_table_info(private);
2157 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2159 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2160 u_int8_t type, u_int8_t code,
2163 return (type == test_type && code >= min_code && code <= max_code)
2168 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2170 const struct icmp6hdr *ic;
2171 struct icmp6hdr _icmph;
2172 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2174 /* Must not be a fragment. */
2175 if (par->fragoff != 0)
2178 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2180 /* We've been asked to examine this packet, and we
2181 * can't. Hence, no choice but to drop.
2183 duprintf("Dropping evil ICMP tinygram.\n");
2184 par->hotdrop = true;
2188 return icmp6_type_code_match(icmpinfo->type,
2191 ic->icmp6_type, ic->icmp6_code,
2192 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2195 /* Called when user tries to insert an entry of this type. */
2196 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2198 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2200 /* Must specify no unknown invflags */
2201 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2204 /* The built-in targets: standard (NULL) and error. */
2205 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2207 .name = XT_STANDARD_TARGET,
2208 .targetsize = sizeof(int),
2209 .family = NFPROTO_IPV6,
2210 #ifdef CONFIG_COMPAT
2211 .compatsize = sizeof(compat_int_t),
2212 .compat_from_user = compat_standard_from_user,
2213 .compat_to_user = compat_standard_to_user,
2217 .name = XT_ERROR_TARGET,
2218 .target = ip6t_error,
2219 .targetsize = XT_FUNCTION_MAXNAMELEN,
2220 .family = NFPROTO_IPV6,
2224 static struct nf_sockopt_ops ip6t_sockopts = {
2226 .set_optmin = IP6T_BASE_CTL,
2227 .set_optmax = IP6T_SO_SET_MAX+1,
2228 .set = do_ip6t_set_ctl,
2229 #ifdef CONFIG_COMPAT
2230 .compat_set = compat_do_ip6t_set_ctl,
2232 .get_optmin = IP6T_BASE_CTL,
2233 .get_optmax = IP6T_SO_GET_MAX+1,
2234 .get = do_ip6t_get_ctl,
2235 #ifdef CONFIG_COMPAT
2236 .compat_get = compat_do_ip6t_get_ctl,
2238 .owner = THIS_MODULE,
2241 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2244 .match = icmp6_match,
2245 .matchsize = sizeof(struct ip6t_icmp),
2246 .checkentry = icmp6_checkentry,
2247 .proto = IPPROTO_ICMPV6,
2248 .family = NFPROTO_IPV6,
2252 static int __net_init ip6_tables_net_init(struct net *net)
2254 return xt_proto_init(net, NFPROTO_IPV6);
2257 static void __net_exit ip6_tables_net_exit(struct net *net)
2259 xt_proto_fini(net, NFPROTO_IPV6);
2262 static struct pernet_operations ip6_tables_net_ops = {
2263 .init = ip6_tables_net_init,
2264 .exit = ip6_tables_net_exit,
2267 static int __init ip6_tables_init(void)
2271 ret = register_pernet_subsys(&ip6_tables_net_ops);
2275 /* No one else will be downing sem now, so we won't sleep */
2276 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2279 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2283 /* Register setsockopt */
2284 ret = nf_register_sockopt(&ip6t_sockopts);
2288 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2292 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2294 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2296 unregister_pernet_subsys(&ip6_tables_net_ops);
2301 static void __exit ip6_tables_fini(void)
2303 nf_unregister_sockopt(&ip6t_sockopts);
2305 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2306 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2307 unregister_pernet_subsys(&ip6_tables_net_ops);
2310 EXPORT_SYMBOL(ip6t_register_table);
2311 EXPORT_SYMBOL(ip6t_unregister_table);
2312 EXPORT_SYMBOL(ip6t_do_table);
2314 module_init(ip6_tables_init);
2315 module_exit(ip6_tables_fini);