2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
49 #define dprintf(format, args...)
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
55 #define duprintf(format, args...)
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 return xt_alloc_initial_table(ip6t, IP6T);
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
88 ip6_packet_match(const struct sk_buff *skb,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
133 /* ... might want to do something with class and flowlabel here ... */
135 /* look for the desired protocol header */
136 if((ip6info->flags & IP6T_F_PROTO)) {
138 unsigned short _frag_off;
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
146 *fragoff = _frag_off;
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
153 if (ip6info->proto == protohdr) {
154 if(ip6info->invflags & IP6T_INV_PROTO) {
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
168 /* should be ip6 safe */
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
196 return (struct ip6t_entry *)(base + offset);
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
203 static const struct ip6t_ip6 uncond;
205 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
208 static inline const struct xt_entry_target *
209 ip6t_get_target_c(const struct ip6t_entry *e)
211 return ip6t_get_target((struct ip6t_entry *)e);
214 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
215 /* This cries for unification! */
216 static const char *const hooknames[] = {
217 [NF_INET_PRE_ROUTING] = "PREROUTING",
218 [NF_INET_LOCAL_IN] = "INPUT",
219 [NF_INET_FORWARD] = "FORWARD",
220 [NF_INET_LOCAL_OUT] = "OUTPUT",
221 [NF_INET_POST_ROUTING] = "POSTROUTING",
224 enum nf_ip_trace_comments {
225 NF_IP6_TRACE_COMMENT_RULE,
226 NF_IP6_TRACE_COMMENT_RETURN,
227 NF_IP6_TRACE_COMMENT_POLICY,
230 static const char *const comments[] = {
231 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
232 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
233 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
236 static struct nf_loginfo trace_loginfo = {
237 .type = NF_LOG_TYPE_LOG,
240 .level = LOGLEVEL_WARNING,
241 .logflags = NF_LOG_MASK,
246 /* Mildly perf critical (only if packet tracing is on) */
248 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
249 const char *hookname, const char **chainname,
250 const char **comment, unsigned int *rulenum)
252 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
254 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
255 /* Head of user chain: ERROR target with chainname */
256 *chainname = t->target.data;
261 if (s->target_offset == sizeof(struct ip6t_entry) &&
262 strcmp(t->target.u.kernel.target->name,
263 XT_STANDARD_TARGET) == 0 &&
265 unconditional(&s->ipv6)) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
278 static void trace_packet(const struct sk_buff *skb,
280 const struct net_device *in,
281 const struct net_device *out,
282 const char *tablename,
283 const struct xt_table_info *private,
284 const struct ip6t_entry *e)
286 const struct ip6t_entry *root;
287 const char *hookname, *chainname, *comment;
288 const struct ip6t_entry *iter;
289 unsigned int rulenum = 0;
290 struct net *net = dev_net(in ? in : out);
292 root = get_entry(private->entries, private->hook_entry[hook]);
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
302 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
308 static inline __pure struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
311 return (void *)entry + entry->next_offset;
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
316 ip6t_do_table(struct sk_buff *skb,
318 const struct nf_hook_state *state,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
333 indev = state->in ? state->in->name : nulldevname;
334 outdev = state->out ? state->out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar.hotdrop = false;
342 acpar.in = state->in;
343 acpar.out = state->out;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 addend = xt_write_recseq_begin();
351 private = table->private;
353 * Ensure we load private-> members after we've fetched the base
356 smp_read_barrier_depends();
357 cpu = smp_processor_id();
358 table_base = private->entries;
359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
360 stackptr = per_cpu_ptr(private->stackptr, cpu);
363 e = get_entry(table_base, private->hook_entry[hook]);
366 const struct xt_entry_target *t;
367 const struct xt_entry_match *ematch;
368 struct xt_counters *counter;
372 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
373 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
375 e = ip6t_next_entry(e);
379 xt_ematch_foreach(ematch, e) {
380 acpar.match = ematch->u.kernel.match;
381 acpar.matchinfo = ematch->data;
382 if (!acpar.match->match(skb, &acpar))
386 counter = xt_get_this_cpu_counter(&e->counters);
387 ADD_COUNTER(*counter, skb->len, 1);
389 t = ip6t_get_target_c(e);
390 IP_NF_ASSERT(t->u.kernel.target);
392 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
393 /* The packet is traced: log it */
394 if (unlikely(skb->nf_trace))
395 trace_packet(skb, hook, state->in, state->out,
396 table->name, private, e);
398 /* Standard target? */
399 if (!t->u.kernel.target->target) {
402 v = ((struct xt_standard_target *)t)->verdict;
404 /* Pop from stack? */
405 if (v != XT_RETURN) {
406 verdict = (unsigned int)(-v) - 1;
409 if (*stackptr <= origptr)
410 e = get_entry(table_base,
411 private->underflow[hook]);
413 e = ip6t_next_entry(jumpstack[--*stackptr]);
416 if (table_base + v != ip6t_next_entry(e) &&
417 !(e->ipv6.flags & IP6T_F_GOTO)) {
418 if (*stackptr >= private->stacksize) {
422 jumpstack[(*stackptr)++] = e;
425 e = get_entry(table_base, v);
429 acpar.target = t->u.kernel.target;
430 acpar.targinfo = t->data;
432 verdict = t->u.kernel.target->target(skb, &acpar);
433 if (verdict == XT_CONTINUE)
434 e = ip6t_next_entry(e);
438 } while (!acpar.hotdrop);
442 xt_write_recseq_end(addend);
445 #ifdef DEBUG_ALLOW_ALL
454 /* Figures out from what hook each rule can be called: returns 0 if
455 there are loops. Puts hook bitmask in comefrom. */
457 mark_source_chains(const struct xt_table_info *newinfo,
458 unsigned int valid_hooks, void *entry0)
462 /* No recursion; use packet counter to save back ptrs (reset
463 to 0 as we leave), and comefrom to save source hook bitmask */
464 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
465 unsigned int pos = newinfo->hook_entry[hook];
466 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
468 if (!(valid_hooks & (1 << hook)))
471 /* Set initial back pointer. */
472 e->counters.pcnt = pos;
475 const struct xt_standard_target *t
476 = (void *)ip6t_get_target_c(e);
477 int visited = e->comefrom & (1 << hook);
479 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
480 pr_err("iptables: loop hook %u pos %u %08X.\n",
481 hook, pos, e->comefrom);
484 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
486 /* Unconditional return/END. */
487 if ((e->target_offset == sizeof(struct ip6t_entry) &&
488 (strcmp(t->target.u.user.name,
489 XT_STANDARD_TARGET) == 0) &&
491 unconditional(&e->ipv6)) || visited) {
492 unsigned int oldpos, size;
494 if ((strcmp(t->target.u.user.name,
495 XT_STANDARD_TARGET) == 0) &&
496 t->verdict < -NF_MAX_VERDICT - 1) {
497 duprintf("mark_source_chains: bad "
498 "negative verdict (%i)\n",
503 /* Return: backtrack through the last
506 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
507 #ifdef DEBUG_IP_FIREWALL_USER
509 & (1 << NF_INET_NUMHOOKS)) {
510 duprintf("Back unset "
517 pos = e->counters.pcnt;
518 e->counters.pcnt = 0;
520 /* We're at the start. */
524 e = (struct ip6t_entry *)
526 } while (oldpos == pos + e->next_offset);
529 size = e->next_offset;
530 e = (struct ip6t_entry *)
531 (entry0 + pos + size);
532 e->counters.pcnt = pos;
535 int newpos = t->verdict;
537 if (strcmp(t->target.u.user.name,
538 XT_STANDARD_TARGET) == 0 &&
540 if (newpos > newinfo->size -
541 sizeof(struct ip6t_entry)) {
542 duprintf("mark_source_chains: "
543 "bad verdict (%i)\n",
547 /* This a jump; chase it. */
548 duprintf("Jump rule %u -> %u\n",
551 /* ... this is a fallthru */
552 newpos = pos + e->next_offset;
554 e = (struct ip6t_entry *)
556 e->counters.pcnt = pos;
561 duprintf("Finished chain %u\n", hook);
566 static void cleanup_match(struct xt_entry_match *m, struct net *net)
568 struct xt_mtdtor_param par;
571 par.match = m->u.kernel.match;
572 par.matchinfo = m->data;
573 par.family = NFPROTO_IPV6;
574 if (par.match->destroy != NULL)
575 par.match->destroy(&par);
576 module_put(par.match->me);
580 check_entry(const struct ip6t_entry *e, const char *name)
582 const struct xt_entry_target *t;
584 if (!ip6_checkentry(&e->ipv6)) {
585 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
589 if (e->target_offset + sizeof(struct xt_entry_target) >
593 t = ip6t_get_target_c(e);
594 if (e->target_offset + t->u.target_size > e->next_offset)
600 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
602 const struct ip6t_ip6 *ipv6 = par->entryinfo;
605 par->match = m->u.kernel.match;
606 par->matchinfo = m->data;
608 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
609 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
611 duprintf("ip_tables: check failed for `%s'.\n",
619 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
621 struct xt_match *match;
624 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
627 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
628 return PTR_ERR(match);
630 m->u.kernel.match = match;
632 ret = check_match(m, par);
638 module_put(m->u.kernel.match->me);
642 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
644 struct xt_entry_target *t = ip6t_get_target(e);
645 struct xt_tgchk_param par = {
649 .target = t->u.kernel.target,
651 .hook_mask = e->comefrom,
652 .family = NFPROTO_IPV6,
656 t = ip6t_get_target(e);
657 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
658 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
660 duprintf("ip_tables: check failed for `%s'.\n",
661 t->u.kernel.target->name);
668 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
671 struct xt_entry_target *t;
672 struct xt_target *target;
675 struct xt_mtchk_param mtpar;
676 struct xt_entry_match *ematch;
678 ret = check_entry(e, name);
682 e->counters.pcnt = xt_percpu_counter_alloc();
683 if (IS_ERR_VALUE(e->counters.pcnt))
689 mtpar.entryinfo = &e->ipv6;
690 mtpar.hook_mask = e->comefrom;
691 mtpar.family = NFPROTO_IPV6;
692 xt_ematch_foreach(ematch, e) {
693 ret = find_check_match(ematch, &mtpar);
695 goto cleanup_matches;
699 t = ip6t_get_target(e);
700 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
702 if (IS_ERR(target)) {
703 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
704 ret = PTR_ERR(target);
705 goto cleanup_matches;
707 t->u.kernel.target = target;
709 ret = check_target(e, net, name);
714 module_put(t->u.kernel.target->me);
716 xt_ematch_foreach(ematch, e) {
719 cleanup_match(ematch, net);
722 xt_percpu_counter_free(e->counters.pcnt);
727 static bool check_underflow(const struct ip6t_entry *e)
729 const struct xt_entry_target *t;
730 unsigned int verdict;
732 if (!unconditional(&e->ipv6))
734 t = ip6t_get_target_c(e);
735 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
737 verdict = ((struct xt_standard_target *)t)->verdict;
738 verdict = -verdict - 1;
739 return verdict == NF_DROP || verdict == NF_ACCEPT;
743 check_entry_size_and_hooks(struct ip6t_entry *e,
744 struct xt_table_info *newinfo,
745 const unsigned char *base,
746 const unsigned char *limit,
747 const unsigned int *hook_entries,
748 const unsigned int *underflows,
749 unsigned int valid_hooks)
753 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
754 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
755 duprintf("Bad offset %p\n", e);
760 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
761 duprintf("checking: element %p size %u\n",
766 /* Check hooks & underflows */
767 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
768 if (!(valid_hooks & (1 << h)))
770 if ((unsigned char *)e - base == hook_entries[h])
771 newinfo->hook_entry[h] = hook_entries[h];
772 if ((unsigned char *)e - base == underflows[h]) {
773 if (!check_underflow(e)) {
774 pr_err("Underflows must be unconditional and "
775 "use the STANDARD target with "
779 newinfo->underflow[h] = underflows[h];
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
789 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
791 struct xt_tgdtor_param par;
792 struct xt_entry_target *t;
793 struct xt_entry_match *ematch;
795 /* Cleanup all matches */
796 xt_ematch_foreach(ematch, e)
797 cleanup_match(ematch, net);
798 t = ip6t_get_target(e);
801 par.target = t->u.kernel.target;
802 par.targinfo = t->data;
803 par.family = NFPROTO_IPV6;
804 if (par.target->destroy != NULL)
805 par.target->destroy(&par);
806 module_put(par.target->me);
808 xt_percpu_counter_free(e->counters.pcnt);
811 /* Checks and translates the user-supplied table segment (held in
814 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
815 const struct ip6t_replace *repl)
817 struct ip6t_entry *iter;
821 newinfo->size = repl->size;
822 newinfo->number = repl->num_entries;
824 /* Init all hooks to impossible value. */
825 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
826 newinfo->hook_entry[i] = 0xFFFFFFFF;
827 newinfo->underflow[i] = 0xFFFFFFFF;
830 duprintf("translate_table: size %u\n", newinfo->size);
832 /* Walk through entries, checking offsets. */
833 xt_entry_foreach(iter, entry0, newinfo->size) {
834 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
842 if (strcmp(ip6t_get_target(iter)->u.user.name,
843 XT_ERROR_TARGET) == 0)
844 ++newinfo->stacksize;
847 if (i != repl->num_entries) {
848 duprintf("translate_table: %u not %u entries\n",
849 i, repl->num_entries);
853 /* Check hooks all assigned */
854 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
855 /* Only hooks which are valid */
856 if (!(repl->valid_hooks & (1 << i)))
858 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
859 duprintf("Invalid hook entry %u %u\n",
860 i, repl->hook_entry[i]);
863 if (newinfo->underflow[i] == 0xFFFFFFFF) {
864 duprintf("Invalid underflow %u %u\n",
865 i, repl->underflow[i]);
870 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
873 /* Finally, each sanity check must pass */
875 xt_entry_foreach(iter, entry0, newinfo->size) {
876 ret = find_check_entry(iter, net, repl->name, repl->size);
883 xt_entry_foreach(iter, entry0, newinfo->size) {
886 cleanup_entry(iter, net);
895 get_counters(const struct xt_table_info *t,
896 struct xt_counters counters[])
898 struct ip6t_entry *iter;
902 for_each_possible_cpu(cpu) {
903 seqcount_t *s = &per_cpu(xt_recseq, cpu);
906 xt_entry_foreach(iter, t->entries, t->size) {
907 struct xt_counters *tmp;
911 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
913 start = read_seqcount_begin(s);
916 } while (read_seqcount_retry(s, start));
918 ADD_COUNTER(counters[i], bcnt, pcnt);
924 static struct xt_counters *alloc_counters(const struct xt_table *table)
926 unsigned int countersize;
927 struct xt_counters *counters;
928 const struct xt_table_info *private = table->private;
930 /* We need atomic snapshot of counters: rest doesn't change
931 (other than comefrom, which userspace doesn't care
933 countersize = sizeof(struct xt_counters) * private->number;
934 counters = vzalloc(countersize);
936 if (counters == NULL)
937 return ERR_PTR(-ENOMEM);
939 get_counters(private, counters);
945 copy_entries_to_user(unsigned int total_size,
946 const struct xt_table *table,
947 void __user *userptr)
949 unsigned int off, num;
950 const struct ip6t_entry *e;
951 struct xt_counters *counters;
952 const struct xt_table_info *private = table->private;
956 counters = alloc_counters(table);
957 if (IS_ERR(counters))
958 return PTR_ERR(counters);
960 loc_cpu_entry = private->entries;
961 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
966 /* FIXME: use iterator macros --RR */
967 /* ... then go back and fix counters and names */
968 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
970 const struct xt_entry_match *m;
971 const struct xt_entry_target *t;
973 e = (struct ip6t_entry *)(loc_cpu_entry + off);
974 if (copy_to_user(userptr + off
975 + offsetof(struct ip6t_entry, counters),
977 sizeof(counters[num])) != 0) {
982 for (i = sizeof(struct ip6t_entry);
983 i < e->target_offset;
984 i += m->u.match_size) {
987 if (copy_to_user(userptr + off + i
988 + offsetof(struct xt_entry_match,
990 m->u.kernel.match->name,
991 strlen(m->u.kernel.match->name)+1)
998 t = ip6t_get_target_c(e);
999 if (copy_to_user(userptr + off + e->target_offset
1000 + offsetof(struct xt_entry_target,
1002 t->u.kernel.target->name,
1003 strlen(t->u.kernel.target->name)+1) != 0) {
1014 #ifdef CONFIG_COMPAT
1015 static void compat_standard_from_user(void *dst, const void *src)
1017 int v = *(compat_int_t *)src;
1020 v += xt_compat_calc_jump(AF_INET6, v);
1021 memcpy(dst, &v, sizeof(v));
1024 static int compat_standard_to_user(void __user *dst, const void *src)
1026 compat_int_t cv = *(int *)src;
1029 cv -= xt_compat_calc_jump(AF_INET6, cv);
1030 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1033 static int compat_calc_entry(const struct ip6t_entry *e,
1034 const struct xt_table_info *info,
1035 const void *base, struct xt_table_info *newinfo)
1037 const struct xt_entry_match *ematch;
1038 const struct xt_entry_target *t;
1039 unsigned int entry_offset;
1042 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1043 entry_offset = (void *)e - base;
1044 xt_ematch_foreach(ematch, e)
1045 off += xt_compat_match_offset(ematch->u.kernel.match);
1046 t = ip6t_get_target_c(e);
1047 off += xt_compat_target_offset(t->u.kernel.target);
1048 newinfo->size -= off;
1049 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1053 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1054 if (info->hook_entry[i] &&
1055 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1056 newinfo->hook_entry[i] -= off;
1057 if (info->underflow[i] &&
1058 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1059 newinfo->underflow[i] -= off;
1064 static int compat_table_info(const struct xt_table_info *info,
1065 struct xt_table_info *newinfo)
1067 struct ip6t_entry *iter;
1068 void *loc_cpu_entry;
1071 if (!newinfo || !info)
1074 /* we dont care about newinfo->entries */
1075 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1076 newinfo->initial_entries = 0;
1077 loc_cpu_entry = info->entries;
1078 xt_compat_init_offsets(AF_INET6, info->number);
1079 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1080 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1088 static int get_info(struct net *net, void __user *user,
1089 const int *len, int compat)
1091 char name[XT_TABLE_MAXNAMELEN];
1095 if (*len != sizeof(struct ip6t_getinfo)) {
1096 duprintf("length %u != %zu\n", *len,
1097 sizeof(struct ip6t_getinfo));
1101 if (copy_from_user(name, user, sizeof(name)) != 0)
1104 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1105 #ifdef CONFIG_COMPAT
1107 xt_compat_lock(AF_INET6);
1109 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1110 "ip6table_%s", name);
1111 if (!IS_ERR_OR_NULL(t)) {
1112 struct ip6t_getinfo info;
1113 const struct xt_table_info *private = t->private;
1114 #ifdef CONFIG_COMPAT
1115 struct xt_table_info tmp;
1118 ret = compat_table_info(private, &tmp);
1119 xt_compat_flush_offsets(AF_INET6);
1123 memset(&info, 0, sizeof(info));
1124 info.valid_hooks = t->valid_hooks;
1125 memcpy(info.hook_entry, private->hook_entry,
1126 sizeof(info.hook_entry));
1127 memcpy(info.underflow, private->underflow,
1128 sizeof(info.underflow));
1129 info.num_entries = private->number;
1130 info.size = private->size;
1131 strcpy(info.name, name);
1133 if (copy_to_user(user, &info, *len) != 0)
1141 ret = t ? PTR_ERR(t) : -ENOENT;
1142 #ifdef CONFIG_COMPAT
1144 xt_compat_unlock(AF_INET6);
1150 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1154 struct ip6t_get_entries get;
1157 if (*len < sizeof(get)) {
1158 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1161 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1163 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1164 duprintf("get_entries: %u != %zu\n",
1165 *len, sizeof(get) + get.size);
1169 t = xt_find_table_lock(net, AF_INET6, get.name);
1170 if (!IS_ERR_OR_NULL(t)) {
1171 struct xt_table_info *private = t->private;
1172 duprintf("t->private->number = %u\n", private->number);
1173 if (get.size == private->size)
1174 ret = copy_entries_to_user(private->size,
1175 t, uptr->entrytable);
1177 duprintf("get_entries: I've got %u not %u!\n",
1178 private->size, get.size);
1184 ret = t ? PTR_ERR(t) : -ENOENT;
1190 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1191 struct xt_table_info *newinfo, unsigned int num_counters,
1192 void __user *counters_ptr)
1196 struct xt_table_info *oldinfo;
1197 struct xt_counters *counters;
1198 struct ip6t_entry *iter;
1201 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1207 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1208 "ip6table_%s", name);
1209 if (IS_ERR_OR_NULL(t)) {
1210 ret = t ? PTR_ERR(t) : -ENOENT;
1211 goto free_newinfo_counters_untrans;
1215 if (valid_hooks != t->valid_hooks) {
1216 duprintf("Valid hook crap: %08X vs %08X\n",
1217 valid_hooks, t->valid_hooks);
1222 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1226 /* Update module usage count based on number of rules */
1227 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1228 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1229 if ((oldinfo->number > oldinfo->initial_entries) ||
1230 (newinfo->number <= oldinfo->initial_entries))
1232 if ((oldinfo->number > oldinfo->initial_entries) &&
1233 (newinfo->number <= oldinfo->initial_entries))
1236 /* Get the old counters, and synchronize with replace */
1237 get_counters(oldinfo, counters);
1239 /* Decrease module usage counts and free resource */
1240 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1241 cleanup_entry(iter, net);
1243 xt_free_table_info(oldinfo);
1244 if (copy_to_user(counters_ptr, counters,
1245 sizeof(struct xt_counters) * num_counters) != 0) {
1246 /* Silent error, can't fail, new table is already in place */
1247 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1256 free_newinfo_counters_untrans:
1263 do_replace(struct net *net, const void __user *user, unsigned int len)
1266 struct ip6t_replace tmp;
1267 struct xt_table_info *newinfo;
1268 void *loc_cpu_entry;
1269 struct ip6t_entry *iter;
1271 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1274 /* overflow check */
1275 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1277 if (tmp.num_counters == 0)
1280 tmp.name[sizeof(tmp.name)-1] = 0;
1282 newinfo = xt_alloc_table_info(tmp.size);
1286 loc_cpu_entry = newinfo->entries;
1287 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1293 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1297 duprintf("ip_tables: Translated table\n");
1299 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1300 tmp.num_counters, tmp.counters);
1302 goto free_newinfo_untrans;
1305 free_newinfo_untrans:
1306 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1307 cleanup_entry(iter, net);
1309 xt_free_table_info(newinfo);
1314 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1318 struct xt_counters_info tmp;
1319 struct xt_counters *paddc;
1320 unsigned int num_counters;
1325 const struct xt_table_info *private;
1327 struct ip6t_entry *iter;
1328 unsigned int addend;
1329 #ifdef CONFIG_COMPAT
1330 struct compat_xt_counters_info compat_tmp;
1334 size = sizeof(struct compat_xt_counters_info);
1339 size = sizeof(struct xt_counters_info);
1342 if (copy_from_user(ptmp, user, size) != 0)
1345 #ifdef CONFIG_COMPAT
1347 num_counters = compat_tmp.num_counters;
1348 name = compat_tmp.name;
1352 num_counters = tmp.num_counters;
1356 if (len != size + num_counters * sizeof(struct xt_counters))
1359 paddc = vmalloc(len - size);
1363 if (copy_from_user(paddc, user + size, len - size) != 0) {
1368 t = xt_find_table_lock(net, AF_INET6, name);
1369 if (IS_ERR_OR_NULL(t)) {
1370 ret = t ? PTR_ERR(t) : -ENOENT;
1375 private = t->private;
1376 if (private->number != num_counters) {
1378 goto unlock_up_free;
1382 addend = xt_write_recseq_begin();
1383 xt_entry_foreach(iter, private->entries, private->size) {
1384 struct xt_counters *tmp;
1386 tmp = xt_get_this_cpu_counter(&iter->counters);
1387 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1390 xt_write_recseq_end(addend);
1401 #ifdef CONFIG_COMPAT
1402 struct compat_ip6t_replace {
1403 char name[XT_TABLE_MAXNAMELEN];
1407 u32 hook_entry[NF_INET_NUMHOOKS];
1408 u32 underflow[NF_INET_NUMHOOKS];
1410 compat_uptr_t counters; /* struct xt_counters * */
1411 struct compat_ip6t_entry entries[0];
1415 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1416 unsigned int *size, struct xt_counters *counters,
1419 struct xt_entry_target *t;
1420 struct compat_ip6t_entry __user *ce;
1421 u_int16_t target_offset, next_offset;
1422 compat_uint_t origsize;
1423 const struct xt_entry_match *ematch;
1427 ce = (struct compat_ip6t_entry __user *)*dstptr;
1428 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1429 copy_to_user(&ce->counters, &counters[i],
1430 sizeof(counters[i])) != 0)
1433 *dstptr += sizeof(struct compat_ip6t_entry);
1434 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1436 xt_ematch_foreach(ematch, e) {
1437 ret = xt_compat_match_to_user(ematch, dstptr, size);
1441 target_offset = e->target_offset - (origsize - *size);
1442 t = ip6t_get_target(e);
1443 ret = xt_compat_target_to_user(t, dstptr, size);
1446 next_offset = e->next_offset - (origsize - *size);
1447 if (put_user(target_offset, &ce->target_offset) != 0 ||
1448 put_user(next_offset, &ce->next_offset) != 0)
1454 compat_find_calc_match(struct xt_entry_match *m,
1456 const struct ip6t_ip6 *ipv6,
1459 struct xt_match *match;
1461 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1462 m->u.user.revision);
1463 if (IS_ERR(match)) {
1464 duprintf("compat_check_calc_match: `%s' not found\n",
1466 return PTR_ERR(match);
1468 m->u.kernel.match = match;
1469 *size += xt_compat_match_offset(match);
1473 static void compat_release_entry(struct compat_ip6t_entry *e)
1475 struct xt_entry_target *t;
1476 struct xt_entry_match *ematch;
1478 /* Cleanup all matches */
1479 xt_ematch_foreach(ematch, e)
1480 module_put(ematch->u.kernel.match->me);
1481 t = compat_ip6t_get_target(e);
1482 module_put(t->u.kernel.target->me);
1486 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1487 struct xt_table_info *newinfo,
1489 const unsigned char *base,
1490 const unsigned char *limit,
1491 const unsigned int *hook_entries,
1492 const unsigned int *underflows,
1495 struct xt_entry_match *ematch;
1496 struct xt_entry_target *t;
1497 struct xt_target *target;
1498 unsigned int entry_offset;
1502 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1503 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1504 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1505 duprintf("Bad offset %p, limit = %p\n", e, limit);
1509 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1510 sizeof(struct compat_xt_entry_target)) {
1511 duprintf("checking: element %p size %u\n",
1516 /* For purposes of check_entry casting the compat entry is fine */
1517 ret = check_entry((struct ip6t_entry *)e, name);
1521 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1522 entry_offset = (void *)e - (void *)base;
1524 xt_ematch_foreach(ematch, e) {
1525 ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
1527 goto release_matches;
1531 t = compat_ip6t_get_target(e);
1532 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1533 t->u.user.revision);
1534 if (IS_ERR(target)) {
1535 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1537 ret = PTR_ERR(target);
1538 goto release_matches;
1540 t->u.kernel.target = target;
1542 off += xt_compat_target_offset(target);
1544 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1548 /* Check hooks & underflows */
1549 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1550 if ((unsigned char *)e - base == hook_entries[h])
1551 newinfo->hook_entry[h] = hook_entries[h];
1552 if ((unsigned char *)e - base == underflows[h])
1553 newinfo->underflow[h] = underflows[h];
1556 /* Clear counters and comefrom */
1557 memset(&e->counters, 0, sizeof(e->counters));
1562 module_put(t->u.kernel.target->me);
1564 xt_ematch_foreach(ematch, e) {
1567 module_put(ematch->u.kernel.match->me);
1573 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1574 unsigned int *size, const char *name,
1575 struct xt_table_info *newinfo, unsigned char *base)
1577 struct xt_entry_target *t;
1578 struct ip6t_entry *de;
1579 unsigned int origsize;
1581 struct xt_entry_match *ematch;
1585 de = (struct ip6t_entry *)*dstptr;
1586 memcpy(de, e, sizeof(struct ip6t_entry));
1587 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1589 *dstptr += sizeof(struct ip6t_entry);
1590 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1592 xt_ematch_foreach(ematch, e) {
1593 ret = xt_compat_match_from_user(ematch, dstptr, size);
1597 de->target_offset = e->target_offset - (origsize - *size);
1598 t = compat_ip6t_get_target(e);
1599 xt_compat_target_from_user(t, dstptr, size);
1601 de->next_offset = e->next_offset - (origsize - *size);
1602 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1603 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1604 newinfo->hook_entry[h] -= origsize - *size;
1605 if ((unsigned char *)de - base < newinfo->underflow[h])
1606 newinfo->underflow[h] -= origsize - *size;
1611 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1616 struct xt_mtchk_param mtpar;
1617 struct xt_entry_match *ematch;
1619 e->counters.pcnt = xt_percpu_counter_alloc();
1620 if (IS_ERR_VALUE(e->counters.pcnt))
1625 mtpar.entryinfo = &e->ipv6;
1626 mtpar.hook_mask = e->comefrom;
1627 mtpar.family = NFPROTO_IPV6;
1628 xt_ematch_foreach(ematch, e) {
1629 ret = check_match(ematch, &mtpar);
1631 goto cleanup_matches;
1635 ret = check_target(e, net, name);
1637 goto cleanup_matches;
1641 xt_ematch_foreach(ematch, e) {
1644 cleanup_match(ematch, net);
1647 xt_percpu_counter_free(e->counters.pcnt);
1653 translate_compat_table(struct net *net,
1655 unsigned int valid_hooks,
1656 struct xt_table_info **pinfo,
1658 unsigned int total_size,
1659 unsigned int number,
1660 unsigned int *hook_entries,
1661 unsigned int *underflows)
1664 struct xt_table_info *newinfo, *info;
1665 void *pos, *entry0, *entry1;
1666 struct compat_ip6t_entry *iter0;
1667 struct ip6t_entry *iter1;
1674 info->number = number;
1676 /* Init all hooks to impossible value. */
1677 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1678 info->hook_entry[i] = 0xFFFFFFFF;
1679 info->underflow[i] = 0xFFFFFFFF;
1682 duprintf("translate_compat_table: size %u\n", info->size);
1684 xt_compat_lock(AF_INET6);
1685 xt_compat_init_offsets(AF_INET6, number);
1686 /* Walk through entries, checking offsets. */
1687 xt_entry_foreach(iter0, entry0, total_size) {
1688 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1690 entry0 + total_size,
1701 duprintf("translate_compat_table: %u not %u entries\n",
1706 /* Check hooks all assigned */
1707 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1708 /* Only hooks which are valid */
1709 if (!(valid_hooks & (1 << i)))
1711 if (info->hook_entry[i] == 0xFFFFFFFF) {
1712 duprintf("Invalid hook entry %u %u\n",
1713 i, hook_entries[i]);
1716 if (info->underflow[i] == 0xFFFFFFFF) {
1717 duprintf("Invalid underflow %u %u\n",
1724 newinfo = xt_alloc_table_info(size);
1728 newinfo->number = number;
1729 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1730 newinfo->hook_entry[i] = info->hook_entry[i];
1731 newinfo->underflow[i] = info->underflow[i];
1733 entry1 = newinfo->entries;
1736 xt_entry_foreach(iter0, entry0, total_size) {
1737 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1738 name, newinfo, entry1);
1742 xt_compat_flush_offsets(AF_INET6);
1743 xt_compat_unlock(AF_INET6);
1748 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1752 xt_entry_foreach(iter1, entry1, newinfo->size) {
1753 ret = compat_check_entry(iter1, net, name);
1757 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1758 XT_ERROR_TARGET) == 0)
1759 ++newinfo->stacksize;
1763 * The first i matches need cleanup_entry (calls ->destroy)
1764 * because they had called ->check already. The other j-i
1765 * entries need only release.
1769 xt_entry_foreach(iter0, entry0, newinfo->size) {
1774 compat_release_entry(iter0);
1776 xt_entry_foreach(iter1, entry1, newinfo->size) {
1779 cleanup_entry(iter1, net);
1781 xt_free_table_info(newinfo);
1787 xt_free_table_info(info);
1791 xt_free_table_info(newinfo);
1793 xt_entry_foreach(iter0, entry0, total_size) {
1796 compat_release_entry(iter0);
1800 xt_compat_flush_offsets(AF_INET6);
1801 xt_compat_unlock(AF_INET6);
1806 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1809 struct compat_ip6t_replace tmp;
1810 struct xt_table_info *newinfo;
1811 void *loc_cpu_entry;
1812 struct ip6t_entry *iter;
1814 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1817 /* overflow check */
1818 if (tmp.size >= INT_MAX / num_possible_cpus())
1820 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1822 if (tmp.num_counters == 0)
1825 tmp.name[sizeof(tmp.name)-1] = 0;
1827 newinfo = xt_alloc_table_info(tmp.size);
1831 loc_cpu_entry = newinfo->entries;
1832 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1838 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1839 &newinfo, &loc_cpu_entry, tmp.size,
1840 tmp.num_entries, tmp.hook_entry,
1845 duprintf("compat_do_replace: Translated table\n");
1847 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1848 tmp.num_counters, compat_ptr(tmp.counters));
1850 goto free_newinfo_untrans;
1853 free_newinfo_untrans:
1854 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1855 cleanup_entry(iter, net);
1857 xt_free_table_info(newinfo);
1862 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1867 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1871 case IP6T_SO_SET_REPLACE:
1872 ret = compat_do_replace(sock_net(sk), user, len);
1875 case IP6T_SO_SET_ADD_COUNTERS:
1876 ret = do_add_counters(sock_net(sk), user, len, 1);
1880 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1887 struct compat_ip6t_get_entries {
1888 char name[XT_TABLE_MAXNAMELEN];
1890 struct compat_ip6t_entry entrytable[0];
1894 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1895 void __user *userptr)
1897 struct xt_counters *counters;
1898 const struct xt_table_info *private = table->private;
1903 struct ip6t_entry *iter;
1905 counters = alloc_counters(table);
1906 if (IS_ERR(counters))
1907 return PTR_ERR(counters);
1911 xt_entry_foreach(iter, private->entries, total_size) {
1912 ret = compat_copy_entry_to_user(iter, &pos,
1913 &size, counters, i++);
1923 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1927 struct compat_ip6t_get_entries get;
1930 if (*len < sizeof(get)) {
1931 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1935 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1938 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1939 duprintf("compat_get_entries: %u != %zu\n",
1940 *len, sizeof(get) + get.size);
1944 xt_compat_lock(AF_INET6);
1945 t = xt_find_table_lock(net, AF_INET6, get.name);
1946 if (!IS_ERR_OR_NULL(t)) {
1947 const struct xt_table_info *private = t->private;
1948 struct xt_table_info info;
1949 duprintf("t->private->number = %u\n", private->number);
1950 ret = compat_table_info(private, &info);
1951 if (!ret && get.size == info.size) {
1952 ret = compat_copy_entries_to_user(private->size,
1953 t, uptr->entrytable);
1955 duprintf("compat_get_entries: I've got %u not %u!\n",
1956 private->size, get.size);
1959 xt_compat_flush_offsets(AF_INET6);
1963 ret = t ? PTR_ERR(t) : -ENOENT;
1965 xt_compat_unlock(AF_INET6);
1969 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1972 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1976 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1980 case IP6T_SO_GET_INFO:
1981 ret = get_info(sock_net(sk), user, len, 1);
1983 case IP6T_SO_GET_ENTRIES:
1984 ret = compat_get_entries(sock_net(sk), user, len);
1987 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1994 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1998 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2002 case IP6T_SO_SET_REPLACE:
2003 ret = do_replace(sock_net(sk), user, len);
2006 case IP6T_SO_SET_ADD_COUNTERS:
2007 ret = do_add_counters(sock_net(sk), user, len, 0);
2011 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2019 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2023 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2027 case IP6T_SO_GET_INFO:
2028 ret = get_info(sock_net(sk), user, len, 0);
2031 case IP6T_SO_GET_ENTRIES:
2032 ret = get_entries(sock_net(sk), user, len);
2035 case IP6T_SO_GET_REVISION_MATCH:
2036 case IP6T_SO_GET_REVISION_TARGET: {
2037 struct xt_get_revision rev;
2040 if (*len != sizeof(rev)) {
2044 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2048 rev.name[sizeof(rev.name)-1] = 0;
2050 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2055 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2058 "ip6t_%s", rev.name);
2063 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2070 struct xt_table *ip6t_register_table(struct net *net,
2071 const struct xt_table *table,
2072 const struct ip6t_replace *repl)
2075 struct xt_table_info *newinfo;
2076 struct xt_table_info bootstrap = {0};
2077 void *loc_cpu_entry;
2078 struct xt_table *new_table;
2080 newinfo = xt_alloc_table_info(repl->size);
2086 loc_cpu_entry = newinfo->entries;
2087 memcpy(loc_cpu_entry, repl->entries, repl->size);
2089 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2093 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2094 if (IS_ERR(new_table)) {
2095 ret = PTR_ERR(new_table);
2101 xt_free_table_info(newinfo);
2103 return ERR_PTR(ret);
2106 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2108 struct xt_table_info *private;
2109 void *loc_cpu_entry;
2110 struct module *table_owner = table->me;
2111 struct ip6t_entry *iter;
2113 private = xt_unregister_table(table);
2115 /* Decrease module usage counts and free resources */
2116 loc_cpu_entry = private->entries;
2117 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2118 cleanup_entry(iter, net);
2119 if (private->number > private->initial_entries)
2120 module_put(table_owner);
2121 xt_free_table_info(private);
2124 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2126 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2127 u_int8_t type, u_int8_t code,
2130 return (type == test_type && code >= min_code && code <= max_code)
2135 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2137 const struct icmp6hdr *ic;
2138 struct icmp6hdr _icmph;
2139 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2141 /* Must not be a fragment. */
2142 if (par->fragoff != 0)
2145 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2147 /* We've been asked to examine this packet, and we
2148 * can't. Hence, no choice but to drop.
2150 duprintf("Dropping evil ICMP tinygram.\n");
2151 par->hotdrop = true;
2155 return icmp6_type_code_match(icmpinfo->type,
2158 ic->icmp6_type, ic->icmp6_code,
2159 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2162 /* Called when user tries to insert an entry of this type. */
2163 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2165 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2167 /* Must specify no unknown invflags */
2168 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2171 /* The built-in targets: standard (NULL) and error. */
2172 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2174 .name = XT_STANDARD_TARGET,
2175 .targetsize = sizeof(int),
2176 .family = NFPROTO_IPV6,
2177 #ifdef CONFIG_COMPAT
2178 .compatsize = sizeof(compat_int_t),
2179 .compat_from_user = compat_standard_from_user,
2180 .compat_to_user = compat_standard_to_user,
2184 .name = XT_ERROR_TARGET,
2185 .target = ip6t_error,
2186 .targetsize = XT_FUNCTION_MAXNAMELEN,
2187 .family = NFPROTO_IPV6,
2191 static struct nf_sockopt_ops ip6t_sockopts = {
2193 .set_optmin = IP6T_BASE_CTL,
2194 .set_optmax = IP6T_SO_SET_MAX+1,
2195 .set = do_ip6t_set_ctl,
2196 #ifdef CONFIG_COMPAT
2197 .compat_set = compat_do_ip6t_set_ctl,
2199 .get_optmin = IP6T_BASE_CTL,
2200 .get_optmax = IP6T_SO_GET_MAX+1,
2201 .get = do_ip6t_get_ctl,
2202 #ifdef CONFIG_COMPAT
2203 .compat_get = compat_do_ip6t_get_ctl,
2205 .owner = THIS_MODULE,
2208 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2211 .match = icmp6_match,
2212 .matchsize = sizeof(struct ip6t_icmp),
2213 .checkentry = icmp6_checkentry,
2214 .proto = IPPROTO_ICMPV6,
2215 .family = NFPROTO_IPV6,
2219 static int __net_init ip6_tables_net_init(struct net *net)
2221 return xt_proto_init(net, NFPROTO_IPV6);
2224 static void __net_exit ip6_tables_net_exit(struct net *net)
2226 xt_proto_fini(net, NFPROTO_IPV6);
2229 static struct pernet_operations ip6_tables_net_ops = {
2230 .init = ip6_tables_net_init,
2231 .exit = ip6_tables_net_exit,
2234 static int __init ip6_tables_init(void)
2238 ret = register_pernet_subsys(&ip6_tables_net_ops);
2242 /* No one else will be downing sem now, so we won't sleep */
2243 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2246 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2250 /* Register setsockopt */
2251 ret = nf_register_sockopt(&ip6t_sockopts);
2255 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2259 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2261 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2263 unregister_pernet_subsys(&ip6_tables_net_ops);
2268 static void __exit ip6_tables_fini(void)
2270 nf_unregister_sockopt(&ip6t_sockopts);
2272 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2273 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2274 unregister_pernet_subsys(&ip6_tables_net_ops);
2277 EXPORT_SYMBOL(ip6t_register_table);
2278 EXPORT_SYMBOL(ip6t_unregister_table);
2279 EXPORT_SYMBOL(ip6t_do_table);
2281 module_init(ip6_tables_init);
2282 module_exit(ip6_tables_fini);