2 * Linux Socket Filter Data Structures
4 #ifndef __LINUX_FILTER_H__
5 #define __LINUX_FILTER_H__
9 #include <linux/atomic.h>
10 #include <linux/compat.h>
11 #include <linux/skbuff.h>
12 #include <linux/linkage.h>
13 #include <linux/printk.h>
14 #include <linux/workqueue.h>
16 #include <asm/cacheflush.h>
18 #include <uapi/linux/filter.h>
19 #include <uapi/linux/bpf.h>
26 /* ArgX, context and stack frame pointer register positions. Note,
27 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
28 * calls in BPF_CALL instruction.
30 #define BPF_REG_ARG1 BPF_REG_1
31 #define BPF_REG_ARG2 BPF_REG_2
32 #define BPF_REG_ARG3 BPF_REG_3
33 #define BPF_REG_ARG4 BPF_REG_4
34 #define BPF_REG_ARG5 BPF_REG_5
35 #define BPF_REG_CTX BPF_REG_6
36 #define BPF_REG_FP BPF_REG_10
38 /* Additional register mappings for converted user programs. */
39 #define BPF_REG_A BPF_REG_0
40 #define BPF_REG_X BPF_REG_7
41 #define BPF_REG_TMP BPF_REG_8
43 /* BPF program can access up to 512 bytes of stack space. */
44 #define MAX_BPF_STACK 512
46 /* Helper macros for filter block array initializers. */
48 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
50 #define BPF_ALU64_REG(OP, DST, SRC) \
51 ((struct bpf_insn) { \
52 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
58 #define BPF_ALU32_REG(OP, DST, SRC) \
59 ((struct bpf_insn) { \
60 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
66 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
68 #define BPF_ALU64_IMM(OP, DST, IMM) \
69 ((struct bpf_insn) { \
70 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
76 #define BPF_ALU32_IMM(OP, DST, IMM) \
77 ((struct bpf_insn) { \
78 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
84 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
86 #define BPF_ENDIAN(TYPE, DST, LEN) \
87 ((struct bpf_insn) { \
88 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
94 /* Short form of mov, dst_reg = src_reg */
96 #define BPF_MOV64_REG(DST, SRC) \
97 ((struct bpf_insn) { \
98 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
104 #define BPF_MOV32_REG(DST, SRC) \
105 ((struct bpf_insn) { \
106 .code = BPF_ALU | BPF_MOV | BPF_X, \
112 /* Short form of mov, dst_reg = imm32 */
114 #define BPF_MOV64_IMM(DST, IMM) \
115 ((struct bpf_insn) { \
116 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
122 #define BPF_MOV32_IMM(DST, IMM) \
123 ((struct bpf_insn) { \
124 .code = BPF_ALU | BPF_MOV | BPF_K, \
130 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
131 #define BPF_LD_IMM64(DST, IMM) \
132 BPF_LD_IMM64_RAW(DST, 0, IMM)
134 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_LD | BPF_DW | BPF_IMM, \
140 .imm = (__u32) (IMM) }), \
141 ((struct bpf_insn) { \
142 .code = 0, /* zero is reserved opcode */ \
146 .imm = ((__u64) (IMM)) >> 32 })
148 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
150 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
151 ((struct bpf_insn) { \
152 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
158 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
159 ((struct bpf_insn) { \
160 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
166 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
168 #define BPF_LD_ABS(SIZE, IMM) \
169 ((struct bpf_insn) { \
170 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
176 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
178 #define BPF_LD_IND(SIZE, SRC, IMM) \
179 ((struct bpf_insn) { \
180 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
186 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
188 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
189 ((struct bpf_insn) { \
190 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
196 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
198 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
199 ((struct bpf_insn) { \
200 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
206 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
208 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
209 ((struct bpf_insn) { \
210 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
216 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
218 #define BPF_JMP_REG(OP, DST, SRC, OFF) \
219 ((struct bpf_insn) { \
220 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
226 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
228 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
229 ((struct bpf_insn) { \
230 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
238 #define BPF_EMIT_CALL(FUNC) \
239 ((struct bpf_insn) { \
240 .code = BPF_JMP | BPF_CALL, \
244 .imm = ((FUNC) - __bpf_call_base) })
246 /* Raw code statement block */
248 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
249 ((struct bpf_insn) { \
258 #define BPF_EXIT_INSN() \
259 ((struct bpf_insn) { \
260 .code = BPF_JMP | BPF_EXIT, \
266 #define bytes_to_bpf_size(bytes) \
268 int bpf_size = -EINVAL; \
270 if (bytes == sizeof(u8)) \
272 else if (bytes == sizeof(u16)) \
274 else if (bytes == sizeof(u32)) \
276 else if (bytes == sizeof(u64)) \
282 /* Macro to invoke filter function. */
283 #define SK_RUN_FILTER(filter, ctx) \
284 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
287 /* A struct sock_filter is architecture independent. */
288 struct compat_sock_fprog {
290 compat_uptr_t filter; /* struct sock_filter * */
294 struct sock_fprog_kern {
296 struct sock_filter *filter;
299 struct bpf_binary_header {
305 u16 pages; /* Number of allocated pages */
306 bool jited; /* Is our filter JIT'ed? */
307 u32 len; /* Number of filter blocks */
308 struct sock_fprog_kern *orig_prog; /* Original BPF program */
309 struct bpf_prog_aux *aux; /* Auxiliary fields */
310 unsigned int (*bpf_func)(const struct sk_buff *skb,
311 const struct bpf_insn *filter);
312 /* Instructions for interpreter */
314 struct sock_filter insns[0];
315 struct bpf_insn insnsi[0];
322 struct bpf_prog *prog;
325 #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
327 static inline unsigned int bpf_prog_size(unsigned int proglen)
329 return max(sizeof(struct bpf_prog),
330 offsetof(struct bpf_prog, insns[proglen]));
333 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
335 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
336 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
338 set_memory_ro((unsigned long)fp, fp->pages);
341 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
343 set_memory_rw((unsigned long)fp, fp->pages);
346 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
350 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
353 #endif /* CONFIG_DEBUG_SET_MODULE_RONX */
355 int sk_filter(struct sock *sk, struct sk_buff *skb);
357 void bpf_prog_select_runtime(struct bpf_prog *fp);
358 void bpf_prog_free(struct bpf_prog *fp);
360 int bpf_convert_filter(struct sock_filter *prog, int len,
361 struct bpf_insn *new_prog, int *new_len);
363 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
364 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
365 gfp_t gfp_extra_flags);
366 void __bpf_prog_free(struct bpf_prog *fp);
368 static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
370 bpf_prog_unlock_ro(fp);
374 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
375 void bpf_prog_destroy(struct bpf_prog *fp);
377 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
378 int sk_detach_filter(struct sock *sk);
380 int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
381 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
384 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
385 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
387 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
388 void bpf_int_jit_compile(struct bpf_prog *fp);
390 #ifdef CONFIG_BPF_JIT
391 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
393 struct bpf_binary_header *
394 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
395 unsigned int alignment,
396 bpf_jit_fill_hole_t bpf_fill_ill_insns);
397 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
399 void bpf_jit_compile(struct bpf_prog *fp);
400 void bpf_jit_free(struct bpf_prog *fp);
402 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
403 u32 pass, void *image)
405 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
406 flen, proglen, pass, image);
408 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
409 16, 1, image, proglen, false);
412 static inline void bpf_jit_compile(struct bpf_prog *fp)
416 static inline void bpf_jit_free(struct bpf_prog *fp)
418 bpf_prog_unlock_free(fp);
420 #endif /* CONFIG_BPF_JIT */
422 #define BPF_ANC BIT(15)
424 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
426 BUG_ON(ftest->code & BPF_ANC);
428 switch (ftest->code) {
429 case BPF_LD | BPF_W | BPF_ABS:
430 case BPF_LD | BPF_H | BPF_ABS:
431 case BPF_LD | BPF_B | BPF_ABS:
432 #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
433 return BPF_ANC | SKF_AD_##CODE
435 BPF_ANCILLARY(PROTOCOL);
436 BPF_ANCILLARY(PKTTYPE);
437 BPF_ANCILLARY(IFINDEX);
438 BPF_ANCILLARY(NLATTR);
439 BPF_ANCILLARY(NLATTR_NEST);
441 BPF_ANCILLARY(QUEUE);
442 BPF_ANCILLARY(HATYPE);
443 BPF_ANCILLARY(RXHASH);
445 BPF_ANCILLARY(ALU_XOR_X);
446 BPF_ANCILLARY(VLAN_TAG);
447 BPF_ANCILLARY(VLAN_TAG_PRESENT);
448 BPF_ANCILLARY(PAY_OFFSET);
449 BPF_ANCILLARY(RANDOM);
457 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
458 int k, unsigned int size);
460 static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
461 unsigned int size, void *buffer)
464 return skb_header_pointer(skb, k, size, buffer);
466 return bpf_internal_load_pointer_neg_helper(skb, k, size);
469 static inline int bpf_tell_extensions(void)
474 #endif /* __LINUX_FILTER_H__ */