5 #include <linux/netdevice.h>
6 #include <uapi/linux/netfilter/x_tables.h>
9 * struct xt_action_param - parameters for matches/targets
11 * @match: the match extension
12 * @target: the target extension
13 * @matchinfo: per-match data
14 * @targetinfo: per-target data
15 * @in: input netdevice
16 * @out: output netdevice
17 * @fragoff: packet is a fragment, this is the data offset
18 * @thoff: position of transport header relative to skb->data
19 * @hook: hook number given packet came from
20 * @family: Actual NFPROTO_* through which the function is invoked
21 * (helpful when match->family == NFPROTO_UNSPEC)
23 * Fields written to by extensions:
25 * @hotdrop: drop packet if we had inspection problems
26 * Network namespace obtainable using dev_net(in/out)
28 struct xt_action_param {
30 const struct xt_match *match;
31 const struct xt_target *target;
34 const void *matchinfo, *targinfo;
36 const struct net_device *in, *out;
45 * struct xt_mtchk_param - parameters for match extensions'
46 * checkentry functions
48 * @net: network namespace through which the check was invoked
49 * @table: table the rule is tried to be inserted into
50 * @entryinfo: the family-specific rule data
51 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
52 * @match: struct xt_match through which this function was invoked
53 * @matchinfo: per-match data
54 * @hook_mask: via which hooks the new rule is reachable
55 * Other fields as above.
57 struct xt_mtchk_param {
60 const void *entryinfo;
61 const struct xt_match *match;
63 unsigned int hook_mask;
69 * struct xt_mdtor_param - match destructor parameters
72 struct xt_mtdtor_param {
74 const struct xt_match *match;
80 * struct xt_tgchk_param - parameters for target extensions'
81 * checkentry functions
83 * @entryinfo: the family-specific rule data
84 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
86 * Other fields see above.
88 struct xt_tgchk_param {
91 const void *entryinfo;
92 const struct xt_target *target;
94 unsigned int hook_mask;
99 /* Target destructor parameters */
100 struct xt_tgdtor_param {
102 const struct xt_target *target;
108 struct list_head list;
110 const char name[XT_EXTENSION_MAXNAMELEN];
113 /* Return true or false: return FALSE and set *hotdrop = 1 to
114 force immediate packet drop. */
115 /* Arguments changed since 2.6.9, as this must now handle
116 non-linear skb, using skb_header_pointer and
117 skb_ip_make_writable. */
118 bool (*match)(const struct sk_buff *skb,
119 struct xt_action_param *);
121 /* Called when user tries to insert an entry of this type. */
122 int (*checkentry)(const struct xt_mtchk_param *);
124 /* Called when entry of this type deleted. */
125 void (*destroy)(const struct xt_mtdtor_param *);
127 /* Called when userspace align differs from kernel space one */
128 void (*compat_from_user)(void *dst, const void *src);
129 int (*compat_to_user)(void __user *dst, const void *src);
131 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
135 unsigned int matchsize;
137 unsigned int compatsize;
140 unsigned short proto;
142 unsigned short family;
145 /* Registration hooks for targets. */
147 struct list_head list;
149 const char name[XT_EXTENSION_MAXNAMELEN];
152 /* Returns verdict. Argument order changed since 2.6.9, as this
153 must now handle non-linear skbs, using skb_copy_bits and
154 skb_ip_make_writable. */
155 unsigned int (*target)(struct sk_buff *skb,
156 const struct xt_action_param *);
158 /* Called when user tries to insert an entry of this type:
159 hook_mask is a bitmask of hooks from which it can be
161 /* Should return 0 on success or an error code otherwise (-Exxxx). */
162 int (*checkentry)(const struct xt_tgchk_param *);
164 /* Called when entry of this type deleted. */
165 void (*destroy)(const struct xt_tgdtor_param *);
167 /* Called when userspace align differs from kernel space one */
168 void (*compat_from_user)(void *dst, const void *src);
169 int (*compat_to_user)(void __user *dst, const void *src);
171 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
175 unsigned int targetsize;
177 unsigned int compatsize;
180 unsigned short proto;
182 unsigned short family;
185 /* Furniture shopping... */
187 struct list_head list;
189 /* What hooks you will enter on */
190 unsigned int valid_hooks;
192 /* Man behind the curtain... */
193 struct xt_table_info *private;
195 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
198 u_int8_t af; /* address/protocol family */
199 int priority; /* hook order */
201 /* A unique name... */
202 const char name[XT_TABLE_MAXNAMELEN];
205 #include <linux/netfilter_ipv4.h>
207 /* The table itself */
208 struct xt_table_info {
211 /* Number of entries: FIXME. --RR */
213 /* Initial number of entries. Needed for module usage count */
214 unsigned int initial_entries;
216 /* Entry points and underflows */
217 unsigned int hook_entry[NF_INET_NUMHOOKS];
218 unsigned int underflow[NF_INET_NUMHOOKS];
221 * Number of user chains. Since tables cannot have loops, at most
222 * @stacksize jumps (number of user chains) can possibly be made.
224 unsigned int stacksize;
225 unsigned int __percpu *stackptr;
228 unsigned char entries[0] __aligned(8);
231 int xt_register_target(struct xt_target *target);
232 void xt_unregister_target(struct xt_target *target);
233 int xt_register_targets(struct xt_target *target, unsigned int n);
234 void xt_unregister_targets(struct xt_target *target, unsigned int n);
236 int xt_register_match(struct xt_match *target);
237 void xt_unregister_match(struct xt_match *target);
238 int xt_register_matches(struct xt_match *match, unsigned int n);
239 void xt_unregister_matches(struct xt_match *match, unsigned int n);
241 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
243 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
246 struct xt_table *xt_register_table(struct net *net,
247 const struct xt_table *table,
248 struct xt_table_info *bootstrap,
249 struct xt_table_info *newinfo);
250 void *xt_unregister_table(struct xt_table *table);
252 struct xt_table_info *xt_replace_table(struct xt_table *table,
253 unsigned int num_counters,
254 struct xt_table_info *newinfo,
257 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
258 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
259 struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
260 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
261 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
264 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
266 void xt_table_unlock(struct xt_table *t);
268 int xt_proto_init(struct net *net, u_int8_t af);
269 void xt_proto_fini(struct net *net, u_int8_t af);
271 struct xt_table_info *xt_alloc_table_info(unsigned int size);
272 void xt_free_table_info(struct xt_table_info *info);
275 * xt_recseq - recursive seqcount for netfilter use
277 * Packet processing changes the seqcount only if no recursion happened
278 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
279 * because we use the normal seqcount convention :
280 * Low order bit set to 1 if a writer is active.
282 DECLARE_PER_CPU(seqcount_t, xt_recseq);
285 * xt_write_recseq_begin - start of a write section
287 * Begin packet processing : all readers must wait the end
288 * 1) Must be called with preemption disabled
289 * 2) softirqs must be disabled too (or we should use this_cpu_add())
291 * 1 if no recursion on this cpu
292 * 0 if recursion detected
294 static inline unsigned int xt_write_recseq_begin(void)
299 * Low order bit of sequence is set if we already
300 * called xt_write_recseq_begin().
302 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
305 * This is kind of a write_seqcount_begin(), but addend is 0 or 1
306 * We dont check addend value to avoid a test and conditional jump,
307 * since addend is most likely 1
309 __this_cpu_add(xt_recseq.sequence, addend);
316 * xt_write_recseq_end - end of a write section
317 * @addend: return value from previous xt_write_recseq_begin()
319 * End packet processing : all readers can proceed
320 * 1) Must be called with preemption disabled
321 * 2) softirqs must be disabled too (or we should use this_cpu_add())
323 static inline void xt_write_recseq_end(unsigned int addend)
325 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
327 __this_cpu_add(xt_recseq.sequence, addend);
331 * This helper is performance critical and must be inlined
333 static inline unsigned long ifname_compare_aligned(const char *_a,
337 const unsigned long *a = (const unsigned long *)_a;
338 const unsigned long *b = (const unsigned long *)_b;
339 const unsigned long *mask = (const unsigned long *)_mask;
342 ret = (a[0] ^ b[0]) & mask[0];
343 if (IFNAMSIZ > sizeof(unsigned long))
344 ret |= (a[1] ^ b[1]) & mask[1];
345 if (IFNAMSIZ > 2 * sizeof(unsigned long))
346 ret |= (a[2] ^ b[2]) & mask[2];
347 if (IFNAMSIZ > 3 * sizeof(unsigned long))
348 ret |= (a[3] ^ b[3]) & mask[3];
349 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
354 /* On SMP, ip(6)t_entry->counters.pcnt holds address of the
355 * real (percpu) counter. On !SMP, its just the packet count,
356 * so nothing needs to be done there.
358 * xt_percpu_counter_alloc returns the address of the percpu
359 * counter, or 0 on !SMP. We force an alignment of 16 bytes
360 * so that bytes/packets share a common cache line.
362 * Hence caller must use IS_ERR_VALUE to check for error, this
363 * allows us to return 0 for single core systems without forcing
364 * callers to deal with SMP vs. NONSMP issues.
366 static inline u64 xt_percpu_counter_alloc(void)
368 if (nr_cpu_ids > 1) {
369 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
370 sizeof(struct xt_counters));
373 return (u64) -ENOMEM;
375 return (__force u64) res;
380 static inline void xt_percpu_counter_free(u64 pcnt)
383 free_percpu((void __percpu *) pcnt);
386 static inline struct xt_counters *
387 xt_get_this_cpu_counter(struct xt_counters *cnt)
390 return this_cpu_ptr((void __percpu *) cnt->pcnt);
395 static inline struct xt_counters *
396 xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
399 return per_cpu_ptr((void __percpu *) cnt->pcnt, cpu);
404 struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
405 void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
408 #include <net/compat.h>
410 struct compat_xt_entry_match {
413 u_int16_t match_size;
414 char name[XT_FUNCTION_MAXNAMELEN - 1];
418 u_int16_t match_size;
421 u_int16_t match_size;
423 unsigned char data[0];
426 struct compat_xt_entry_target {
429 u_int16_t target_size;
430 char name[XT_FUNCTION_MAXNAMELEN - 1];
434 u_int16_t target_size;
435 compat_uptr_t target;
437 u_int16_t target_size;
439 unsigned char data[0];
442 /* FIXME: this works only on 32 bit tasks
443 * need to change whole approach in order to calculate align as function of
444 * current task alignment */
446 struct compat_xt_counters {
447 compat_u64 pcnt, bcnt; /* Packet and byte counters */
450 struct compat_xt_counters_info {
451 char name[XT_TABLE_MAXNAMELEN];
452 compat_uint_t num_counters;
453 struct compat_xt_counters counters[0];
456 struct _compat_xt_align {
463 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
465 void xt_compat_lock(u_int8_t af);
466 void xt_compat_unlock(u_int8_t af);
468 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
469 void xt_compat_flush_offsets(u_int8_t af);
470 void xt_compat_init_offsets(u_int8_t af, unsigned int number);
471 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
473 int xt_compat_match_offset(const struct xt_match *match);
474 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
476 int xt_compat_match_to_user(const struct xt_entry_match *m,
477 void __user **dstptr, unsigned int *size);
479 int xt_compat_target_offset(const struct xt_target *target);
480 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
482 int xt_compat_target_to_user(const struct xt_entry_target *t,
483 void __user **dstptr, unsigned int *size);
485 #endif /* CONFIG_COMPAT */
486 #endif /* _X_TABLES_H */