2 * Just-In-Time compiler for BPF filters on MIPS
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
12 #include <linux/bitops.h>
13 #include <linux/compiler.h>
14 #include <linux/errno.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17 #include <linux/kconfig.h>
18 #include <linux/moduleloader.h>
19 #include <linux/netdevice.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
24 #include <asm/bitops.h>
25 #include <asm/cacheflush.h>
26 #include <asm/cpu-features.h>
38 * On entry (*bpf_func)(*skb, *filter)
39 * a0 = MIPS_R_A0 = skb;
40 * a1 = MIPS_R_A1 = filter;
52 * saved reg 0 <-- r_sp
57 * <--------------------- len ------------------------>
58 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
59 * ----------------------------------------------------
61 * ----------------------------------------------------
64 #define ptr typeof(unsigned long)
66 #define SCRATCH_OFF(k) (4 * (k))
69 #define SEEN_CALL (1 << BPF_MEMWORDS)
70 #define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
71 #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
72 #define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
73 #define SEEN_OFF SEEN_SREG(2)
74 #define SEEN_A SEEN_SREG(3)
75 #define SEEN_X SEEN_SREG(4)
76 #define SEEN_SKB SEEN_SREG(5)
77 #define SEEN_MEM SEEN_SREG(6)
79 /* Arguments used by JIT */
80 #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
82 #define SBIT(x) (1 << (x)) /* Signed version of BIT() */
85 * struct jit_ctx - JIT context
87 * @prologue_bytes: Number of bytes for prologue
88 * @idx: Instruction index
90 * @offsets: Instruction offsets
91 * @target: Memory location for the compiled filter
94 const struct bpf_prog *skf;
95 unsigned int prologue_bytes;
103 static inline int optimize_div(u32 *k)
105 /* power of 2 divides can be implemented with right shift */
106 if (!(*k & (*k-1))) {
114 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
116 /* Simply emit the instruction if the JIT memory space has been allocated */
117 #define emit_instr(ctx, func, ...) \
119 if ((ctx)->target != NULL) { \
120 u32 *p = &(ctx)->target[ctx->idx]; \
121 uasm_i_##func(&p, ##__VA_ARGS__); \
127 * Similar to emit_instr but it must be used when we need to emit
128 * 32-bit or 64-bit instructions
130 #define emit_long_instr(ctx, func, ...) \
132 if ((ctx)->target != NULL) { \
133 u32 *p = &(ctx)->target[ctx->idx]; \
134 UASM_i_##func(&p, ##__VA_ARGS__); \
139 /* Determine if immediate is within the 16-bit signed range */
140 static inline bool is_range16(s32 imm)
142 return !(imm >= SBIT(15) || imm < -SBIT(15));
145 static inline void emit_addu(unsigned int dst, unsigned int src1,
146 unsigned int src2, struct jit_ctx *ctx)
148 emit_instr(ctx, addu, dst, src1, src2);
151 static inline void emit_nop(struct jit_ctx *ctx)
153 emit_instr(ctx, nop);
156 /* Load a u32 immediate to a register */
157 static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
159 if (ctx->target != NULL) {
160 /* addiu can only handle s16 */
161 if (!is_range16(imm)) {
162 u32 *p = &ctx->target[ctx->idx];
163 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
164 p = &ctx->target[ctx->idx + 1];
165 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
167 u32 *p = &ctx->target[ctx->idx];
168 uasm_i_addiu(&p, dst, r_zero, imm);
173 if (!is_range16(imm))
177 static inline void emit_or(unsigned int dst, unsigned int src1,
178 unsigned int src2, struct jit_ctx *ctx)
180 emit_instr(ctx, or, dst, src1, src2);
183 static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
186 if (imm >= BIT(16)) {
187 emit_load_imm(r_tmp, imm, ctx);
188 emit_or(dst, src, r_tmp, ctx);
190 emit_instr(ctx, ori, dst, src, imm);
194 static inline void emit_daddiu(unsigned int dst, unsigned int src,
195 int imm, struct jit_ctx *ctx)
198 * Only used for stack, so the imm is relatively small
199 * and it fits in 15-bits
201 emit_instr(ctx, daddiu, dst, src, imm);
204 static inline void emit_addiu(unsigned int dst, unsigned int src,
205 u32 imm, struct jit_ctx *ctx)
207 if (!is_range16(imm)) {
208 emit_load_imm(r_tmp, imm, ctx);
209 emit_addu(dst, r_tmp, src, ctx);
211 emit_instr(ctx, addiu, dst, src, imm);
215 static inline void emit_and(unsigned int dst, unsigned int src1,
216 unsigned int src2, struct jit_ctx *ctx)
218 emit_instr(ctx, and, dst, src1, src2);
221 static inline void emit_andi(unsigned int dst, unsigned int src,
222 u32 imm, struct jit_ctx *ctx)
224 /* If imm does not fit in u16 then load it to register */
225 if (imm >= BIT(16)) {
226 emit_load_imm(r_tmp, imm, ctx);
227 emit_and(dst, src, r_tmp, ctx);
229 emit_instr(ctx, andi, dst, src, imm);
233 static inline void emit_xor(unsigned int dst, unsigned int src1,
234 unsigned int src2, struct jit_ctx *ctx)
236 emit_instr(ctx, xor, dst, src1, src2);
239 static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
241 /* If imm does not fit in u16 then load it to register */
242 if (imm >= BIT(16)) {
243 emit_load_imm(r_tmp, imm, ctx);
244 emit_xor(dst, src, r_tmp, ctx);
246 emit_instr(ctx, xori, dst, src, imm);
250 static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
252 emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
255 static inline void emit_subu(unsigned int dst, unsigned int src1,
256 unsigned int src2, struct jit_ctx *ctx)
258 emit_instr(ctx, subu, dst, src1, src2);
261 static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
263 emit_subu(reg, r_zero, reg, ctx);
266 static inline void emit_sllv(unsigned int dst, unsigned int src,
267 unsigned int sa, struct jit_ctx *ctx)
269 emit_instr(ctx, sllv, dst, src, sa);
272 static inline void emit_sll(unsigned int dst, unsigned int src,
273 unsigned int sa, struct jit_ctx *ctx)
275 /* sa is 5-bits long */
277 /* Shifting >= 32 results in zero */
278 emit_jit_reg_move(dst, r_zero, ctx);
280 emit_instr(ctx, sll, dst, src, sa);
283 static inline void emit_srlv(unsigned int dst, unsigned int src,
284 unsigned int sa, struct jit_ctx *ctx)
286 emit_instr(ctx, srlv, dst, src, sa);
289 static inline void emit_srl(unsigned int dst, unsigned int src,
290 unsigned int sa, struct jit_ctx *ctx)
292 /* sa is 5-bits long */
294 /* Shifting >= 32 results in zero */
295 emit_jit_reg_move(dst, r_zero, ctx);
297 emit_instr(ctx, srl, dst, src, sa);
300 static inline void emit_slt(unsigned int dst, unsigned int src1,
301 unsigned int src2, struct jit_ctx *ctx)
303 emit_instr(ctx, slt, dst, src1, src2);
306 static inline void emit_sltu(unsigned int dst, unsigned int src1,
307 unsigned int src2, struct jit_ctx *ctx)
309 emit_instr(ctx, sltu, dst, src1, src2);
312 static inline void emit_sltiu(unsigned dst, unsigned int src,
313 unsigned int imm, struct jit_ctx *ctx)
315 /* 16 bit immediate */
316 if (!is_range16((s32)imm)) {
317 emit_load_imm(r_tmp, imm, ctx);
318 emit_sltu(dst, src, r_tmp, ctx);
320 emit_instr(ctx, sltiu, dst, src, imm);
325 /* Store register on the stack */
326 static inline void emit_store_stack_reg(ptr reg, ptr base,
330 emit_long_instr(ctx, SW, reg, offset, base);
333 static inline void emit_store(ptr reg, ptr base, unsigned int offset,
336 emit_instr(ctx, sw, reg, offset, base);
339 static inline void emit_load_stack_reg(ptr reg, ptr base,
343 emit_long_instr(ctx, LW, reg, offset, base);
346 static inline void emit_load(unsigned int reg, unsigned int base,
347 unsigned int offset, struct jit_ctx *ctx)
349 emit_instr(ctx, lw, reg, offset, base);
352 static inline void emit_load_byte(unsigned int reg, unsigned int base,
353 unsigned int offset, struct jit_ctx *ctx)
355 emit_instr(ctx, lb, reg, offset, base);
358 static inline void emit_half_load(unsigned int reg, unsigned int base,
359 unsigned int offset, struct jit_ctx *ctx)
361 emit_instr(ctx, lh, reg, offset, base);
364 static inline void emit_mul(unsigned int dst, unsigned int src1,
365 unsigned int src2, struct jit_ctx *ctx)
367 emit_instr(ctx, mul, dst, src1, src2);
370 static inline void emit_div(unsigned int dst, unsigned int src,
373 if (ctx->target != NULL) {
374 u32 *p = &ctx->target[ctx->idx];
375 uasm_i_divu(&p, dst, src);
376 p = &ctx->target[ctx->idx + 1];
377 uasm_i_mflo(&p, dst);
379 ctx->idx += 2; /* 2 insts */
382 static inline void emit_mod(unsigned int dst, unsigned int src,
385 if (ctx->target != NULL) {
386 u32 *p = &ctx->target[ctx->idx];
387 uasm_i_divu(&p, dst, src);
388 p = &ctx->target[ctx->idx + 1];
389 uasm_i_mfhi(&p, dst);
391 ctx->idx += 2; /* 2 insts */
394 static inline void emit_dsll(unsigned int dst, unsigned int src,
395 unsigned int sa, struct jit_ctx *ctx)
397 emit_instr(ctx, dsll, dst, src, sa);
400 static inline void emit_dsrl32(unsigned int dst, unsigned int src,
401 unsigned int sa, struct jit_ctx *ctx)
403 emit_instr(ctx, dsrl32, dst, src, sa);
406 static inline void emit_wsbh(unsigned int dst, unsigned int src,
409 emit_instr(ctx, wsbh, dst, src);
412 /* load pointer to register */
413 static inline void emit_load_ptr(unsigned int dst, unsigned int src,
414 int imm, struct jit_ctx *ctx)
416 /* src contains the base addr of the 32/64-pointer */
417 emit_long_instr(ctx, LW, dst, imm, src);
420 /* load a function pointer to register */
421 static inline void emit_load_func(unsigned int reg, ptr imm,
424 if (config_enabled(CONFIG_64BIT)) {
425 /* At this point imm is always 64-bit */
426 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
427 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
428 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
429 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
430 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
432 emit_load_imm(reg, imm, ctx);
436 /* Move to real MIPS register */
437 static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
439 emit_long_instr(ctx, ADDU, dst, src, r_zero);
442 /* Move to JIT (32-bit) register */
443 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
445 emit_addu(dst, src, r_zero, ctx);
448 /* Compute the immediate value for PC-relative branches. */
449 static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
451 if (ctx->target == NULL)
455 * We want a pc-relative branch. We only do forward branches
456 * so tgt is always after pc. tgt is the instruction offset
457 * we want to jump to.
460 * I: target_offset <- sign_extend(offset)
461 * I+1: PC += target_offset (delay slot)
463 * ctx->idx currently points to the branch instruction
464 * but the offset is added to the delay slot so we need
467 return ctx->offsets[tgt] -
468 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
471 static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
472 unsigned int imm, struct jit_ctx *ctx)
474 if (ctx->target != NULL) {
475 u32 *p = &ctx->target[ctx->idx];
479 uasm_i_beq(&p, reg1, reg2, imm);
482 uasm_i_bne(&p, reg1, reg2, imm);
488 pr_warn("%s: Unhandled branch conditional: %d\n",
495 static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
497 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
500 static inline void emit_jalr(unsigned int link, unsigned int reg,
503 emit_instr(ctx, jalr, link, reg);
506 static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
508 emit_instr(ctx, jr, reg);
511 static inline u16 align_sp(unsigned int num)
513 /* Double word alignment for 32-bit, quadword for 64-bit */
514 unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
515 num = (num + (align - 1)) & -align;
519 static bool is_load_to_a(u16 inst)
522 case BPF_LD | BPF_W | BPF_LEN:
523 case BPF_LD | BPF_W | BPF_ABS:
524 case BPF_LD | BPF_H | BPF_ABS:
525 case BPF_LD | BPF_B | BPF_ABS:
532 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
534 int i = 0, real_off = 0;
535 u32 sflags, tmp_flags;
537 /* Adjust the stack pointer */
538 emit_stack_offset(-align_sp(offset), ctx);
540 if (ctx->flags & SEEN_CALL) {
541 /* Argument save area */
542 if (config_enabled(CONFIG_64BIT))
543 /* Bottom of current frame */
544 real_off = align_sp(offset) - SZREG;
546 /* Top of previous frame */
547 real_off = align_sp(offset) + SZREG;
548 emit_store_stack_reg(MIPS_R_A0, r_sp, real_off, ctx);
549 emit_store_stack_reg(MIPS_R_A1, r_sp, real_off + SZREG, ctx);
554 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
555 /* sflags is essentially a bitmap */
557 if ((sflags >> i) & 0x1) {
558 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
566 /* save return address */
567 if (ctx->flags & SEEN_CALL) {
568 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
572 /* Setup r_M leaving the alignment gap if necessary */
573 if (ctx->flags & SEEN_MEM) {
574 if (real_off % (SZREG * 2))
576 emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
580 static void restore_bpf_jit_regs(struct jit_ctx *ctx,
584 u32 sflags, tmp_flags;
586 if (ctx->flags & SEEN_CALL) {
587 if (config_enabled(CONFIG_64BIT))
588 /* Bottom of current frame */
589 real_off = align_sp(offset) - SZREG;
591 /* Top of previous frame */
592 real_off = align_sp(offset) + SZREG;
593 emit_load_stack_reg(MIPS_R_A0, r_sp, real_off, ctx);
594 emit_load_stack_reg(MIPS_R_A1, r_sp, real_off + SZREG, ctx);
599 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
600 /* sflags is a bitmap */
603 if ((sflags >> i) & 0x1) {
604 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
612 /* restore return address */
613 if (ctx->flags & SEEN_CALL)
614 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
616 /* Restore the sp and discard the scrach memory */
617 emit_stack_offset(align_sp(offset), ctx);
620 static unsigned int get_stack_depth(struct jit_ctx *ctx)
625 /* How may s* regs do we need to preserved? */
626 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
628 if (ctx->flags & SEEN_MEM)
629 sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
631 if (ctx->flags & SEEN_CALL)
633 * The JIT code make calls to external functions using 2
634 * arguments. Therefore, for o32 we don't need to allocate
635 * space because we don't care if the argumetns are lost
636 * across calls. We do need however to preserve incoming
637 * arguments but the space is already allocated for us by
638 * the caller. On the other hand, for n64, we need to allocate
639 * this space ourselves. We need to preserve $ra as well.
641 sp_off += config_enabled(CONFIG_64BIT) ?
642 (ARGS_USED_BY_JIT + 1) * SZREG : SZREG;
647 static void build_prologue(struct jit_ctx *ctx)
649 u16 first_inst = ctx->skf->insns[0].code;
652 /* Calculate the total offset for the stack pointer */
653 sp_off = get_stack_depth(ctx);
654 save_bpf_jit_regs(ctx, sp_off);
656 if (ctx->flags & SEEN_SKB)
657 emit_reg_move(r_skb, MIPS_R_A0, ctx);
659 if (ctx->flags & SEEN_X)
660 emit_jit_reg_move(r_X, r_zero, ctx);
662 /* Do not leak kernel data to userspace */
663 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
664 emit_jit_reg_move(r_A, r_zero, ctx);
667 static void build_epilogue(struct jit_ctx *ctx)
671 /* Calculate the total offset for the stack pointer */
673 sp_off = get_stack_depth(ctx);
674 restore_bpf_jit_regs(ctx, sp_off);
681 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
686 err = skb_copy_bits(skb, offset, &ret, 1);
688 return (u64)err << 32 | ret;
691 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
696 err = skb_copy_bits(skb, offset, &ret, 2);
698 return (u64)err << 32 | ntohs(ret);
701 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
706 err = skb_copy_bits(skb, offset, &ret, 4);
708 return (u64)err << 32 | ntohl(ret);
711 static int build_body(struct jit_ctx *ctx)
713 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
714 const struct bpf_prog *prog = ctx->skf;
715 const struct sock_filter *inst;
716 unsigned int i, off, load_order, condt;
717 u32 k, b_off __maybe_unused;
719 for (i = 0; i < prog->len; i++) {
722 inst = &(prog->insns[i]);
723 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
724 __func__, inst->code, inst->jt, inst->jf, inst->k);
726 code = bpf_anc_helper(inst);
728 if (ctx->target == NULL)
729 ctx->offsets[i] = ctx->idx * 4;
732 case BPF_LD | BPF_IMM:
733 /* A <- k ==> li r_A, k */
734 ctx->flags |= SEEN_A;
735 emit_load_imm(r_A, k, ctx);
737 case BPF_LD | BPF_W | BPF_LEN:
738 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
739 /* A <- len ==> lw r_A, offset(skb) */
740 ctx->flags |= SEEN_SKB | SEEN_A;
741 off = offsetof(struct sk_buff, len);
742 emit_load(r_A, r_skb, off, ctx);
744 case BPF_LD | BPF_MEM:
745 /* A <- M[k] ==> lw r_A, offset(M) */
746 ctx->flags |= SEEN_MEM | SEEN_A;
747 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
749 case BPF_LD | BPF_W | BPF_ABS:
753 case BPF_LD | BPF_H | BPF_ABS:
757 case BPF_LD | BPF_B | BPF_ABS:
761 /* the interpreter will deal with the negative K */
765 emit_load_imm(r_off, k, ctx);
768 * We may got here from the indirect loads so
769 * return if offset is negative.
771 emit_slt(r_s0, r_off, r_zero, ctx);
772 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
773 b_imm(prog->len, ctx), ctx);
774 emit_reg_move(r_ret, r_zero, ctx);
776 ctx->flags |= SEEN_CALL | SEEN_OFF |
779 emit_load_func(r_s0, (ptr)load_func[load_order],
781 emit_reg_move(MIPS_R_A0, r_skb, ctx);
782 emit_jalr(MIPS_R_RA, r_s0, ctx);
783 /* Load second argument to delay slot */
784 emit_reg_move(MIPS_R_A1, r_off, ctx);
785 /* Check the error value */
786 if (config_enabled(CONFIG_64BIT)) {
787 /* Get error code from the top 32-bits */
788 emit_dsrl32(r_s0, r_val, 0, ctx);
789 /* Branch to 3 instructions ahead */
790 emit_bcond(MIPS_COND_NE, r_s0, r_zero, 3 << 2,
793 /* Branch to 3 instructions ahead */
794 emit_bcond(MIPS_COND_NE, r_err, r_zero, 3 << 2,
799 emit_b(b_imm(i + 1, ctx), ctx);
800 emit_jit_reg_move(r_A, r_val, ctx);
801 /* Return with error */
802 emit_b(b_imm(prog->len, ctx), ctx);
803 emit_reg_move(r_ret, r_zero, ctx);
805 case BPF_LD | BPF_W | BPF_IND:
806 /* A <- P[X + k:4] */
809 case BPF_LD | BPF_H | BPF_IND:
810 /* A <- P[X + k:2] */
813 case BPF_LD | BPF_B | BPF_IND:
814 /* A <- P[X + k:1] */
817 ctx->flags |= SEEN_OFF | SEEN_X;
818 emit_addiu(r_off, r_X, k, ctx);
820 case BPF_LDX | BPF_IMM:
822 ctx->flags |= SEEN_X;
823 emit_load_imm(r_X, k, ctx);
825 case BPF_LDX | BPF_MEM:
827 ctx->flags |= SEEN_X | SEEN_MEM;
828 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
830 case BPF_LDX | BPF_W | BPF_LEN:
832 ctx->flags |= SEEN_X | SEEN_SKB;
833 off = offsetof(struct sk_buff, len);
834 emit_load(r_X, r_skb, off, ctx);
836 case BPF_LDX | BPF_B | BPF_MSH:
837 /* the interpreter will deal with the negative K */
841 /* X <- 4 * (P[k:1] & 0xf) */
842 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
843 /* Load offset to a1 */
844 emit_load_func(r_s0, (ptr)jit_get_skb_b, ctx);
846 * This may emit two instructions so it may not fit
847 * in the delay slot. So use a0 in the delay slot.
849 emit_load_imm(MIPS_R_A1, k, ctx);
850 emit_jalr(MIPS_R_RA, r_s0, ctx);
851 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
852 /* Check the error value */
853 if (config_enabled(CONFIG_64BIT)) {
854 /* Top 32-bits of $v0 on 64-bit */
855 emit_dsrl32(r_s0, r_val, 0, ctx);
856 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
859 emit_bcond(MIPS_COND_NE, r_err, r_zero,
862 /* No need for delay slot */
864 /* X <- P[1:K] & 0xf */
865 emit_andi(r_X, r_val, 0xf, ctx);
867 emit_b(b_imm(i + 1, ctx), ctx);
868 emit_sll(r_X, r_X, 2, ctx); /* delay slot */
869 /* Return with error */
870 emit_b(b_imm(prog->len, ctx), ctx);
871 emit_load_imm(r_ret, 0, ctx); /* delay slot */
875 ctx->flags |= SEEN_MEM | SEEN_A;
876 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
880 ctx->flags |= SEEN_MEM | SEEN_X;
881 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
883 case BPF_ALU | BPF_ADD | BPF_K:
885 ctx->flags |= SEEN_A;
886 emit_addiu(r_A, r_A, k, ctx);
888 case BPF_ALU | BPF_ADD | BPF_X:
890 ctx->flags |= SEEN_A | SEEN_X;
891 emit_addu(r_A, r_A, r_X, ctx);
893 case BPF_ALU | BPF_SUB | BPF_K:
895 ctx->flags |= SEEN_A;
896 emit_addiu(r_A, r_A, -k, ctx);
898 case BPF_ALU | BPF_SUB | BPF_X:
900 ctx->flags |= SEEN_A | SEEN_X;
901 emit_subu(r_A, r_A, r_X, ctx);
903 case BPF_ALU | BPF_MUL | BPF_K:
905 /* Load K to scratch register before MUL */
906 ctx->flags |= SEEN_A;
907 emit_load_imm(r_s0, k, ctx);
908 emit_mul(r_A, r_A, r_s0, ctx);
910 case BPF_ALU | BPF_MUL | BPF_X:
912 ctx->flags |= SEEN_A | SEEN_X;
913 emit_mul(r_A, r_A, r_X, ctx);
915 case BPF_ALU | BPF_DIV | BPF_K:
919 if (optimize_div(&k)) {
920 ctx->flags |= SEEN_A;
921 emit_srl(r_A, r_A, k, ctx);
924 ctx->flags |= SEEN_A;
925 emit_load_imm(r_s0, k, ctx);
926 emit_div(r_A, r_s0, ctx);
928 case BPF_ALU | BPF_MOD | BPF_K:
931 ctx->flags |= SEEN_A;
932 emit_jit_reg_move(r_A, r_zero, ctx);
934 ctx->flags |= SEEN_A;
935 emit_load_imm(r_s0, k, ctx);
936 emit_mod(r_A, r_s0, ctx);
939 case BPF_ALU | BPF_DIV | BPF_X:
941 ctx->flags |= SEEN_X | SEEN_A;
942 /* Check if r_X is zero */
943 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
944 b_imm(prog->len, ctx), ctx);
945 emit_load_imm(r_val, 0, ctx); /* delay slot */
946 emit_div(r_A, r_X, ctx);
948 case BPF_ALU | BPF_MOD | BPF_X:
950 ctx->flags |= SEEN_X | SEEN_A;
951 /* Check if r_X is zero */
952 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
953 b_imm(prog->len, ctx), ctx);
954 emit_load_imm(r_val, 0, ctx); /* delay slot */
955 emit_mod(r_A, r_X, ctx);
957 case BPF_ALU | BPF_OR | BPF_K:
959 ctx->flags |= SEEN_A;
960 emit_ori(r_A, r_A, k, ctx);
962 case BPF_ALU | BPF_OR | BPF_X:
964 ctx->flags |= SEEN_A;
965 emit_ori(r_A, r_A, r_X, ctx);
967 case BPF_ALU | BPF_XOR | BPF_K:
969 ctx->flags |= SEEN_A;
970 emit_xori(r_A, r_A, k, ctx);
972 case BPF_ANC | SKF_AD_ALU_XOR_X:
973 case BPF_ALU | BPF_XOR | BPF_X:
975 ctx->flags |= SEEN_A;
976 emit_xor(r_A, r_A, r_X, ctx);
978 case BPF_ALU | BPF_AND | BPF_K:
980 ctx->flags |= SEEN_A;
981 emit_andi(r_A, r_A, k, ctx);
983 case BPF_ALU | BPF_AND | BPF_X:
985 ctx->flags |= SEEN_A | SEEN_X;
986 emit_and(r_A, r_A, r_X, ctx);
988 case BPF_ALU | BPF_LSH | BPF_K:
990 ctx->flags |= SEEN_A;
991 emit_sll(r_A, r_A, k, ctx);
993 case BPF_ALU | BPF_LSH | BPF_X:
995 ctx->flags |= SEEN_A | SEEN_X;
996 emit_sllv(r_A, r_A, r_X, ctx);
998 case BPF_ALU | BPF_RSH | BPF_K:
1000 ctx->flags |= SEEN_A;
1001 emit_srl(r_A, r_A, k, ctx);
1003 case BPF_ALU | BPF_RSH | BPF_X:
1004 ctx->flags |= SEEN_A | SEEN_X;
1005 emit_srlv(r_A, r_A, r_X, ctx);
1007 case BPF_ALU | BPF_NEG:
1009 ctx->flags |= SEEN_A;
1012 case BPF_JMP | BPF_JA:
1014 emit_b(b_imm(i + k + 1, ctx), ctx);
1017 case BPF_JMP | BPF_JEQ | BPF_K:
1018 /* pc += ( A == K ) ? pc->jt : pc->jf */
1019 condt = MIPS_COND_EQ | MIPS_COND_K;
1021 case BPF_JMP | BPF_JEQ | BPF_X:
1022 ctx->flags |= SEEN_X;
1023 /* pc += ( A == X ) ? pc->jt : pc->jf */
1024 condt = MIPS_COND_EQ | MIPS_COND_X;
1026 case BPF_JMP | BPF_JGE | BPF_K:
1027 /* pc += ( A >= K ) ? pc->jt : pc->jf */
1028 condt = MIPS_COND_GE | MIPS_COND_K;
1030 case BPF_JMP | BPF_JGE | BPF_X:
1031 ctx->flags |= SEEN_X;
1032 /* pc += ( A >= X ) ? pc->jt : pc->jf */
1033 condt = MIPS_COND_GE | MIPS_COND_X;
1035 case BPF_JMP | BPF_JGT | BPF_K:
1036 /* pc += ( A > K ) ? pc->jt : pc->jf */
1037 condt = MIPS_COND_GT | MIPS_COND_K;
1039 case BPF_JMP | BPF_JGT | BPF_X:
1040 ctx->flags |= SEEN_X;
1041 /* pc += ( A > X ) ? pc->jt : pc->jf */
1042 condt = MIPS_COND_GT | MIPS_COND_X;
1044 /* Greater or Equal */
1045 if ((condt & MIPS_COND_GE) ||
1046 (condt & MIPS_COND_GT)) {
1047 if (condt & MIPS_COND_K) { /* K */
1048 ctx->flags |= SEEN_A;
1049 emit_sltiu(r_s0, r_A, k, ctx);
1051 ctx->flags |= SEEN_A |
1053 emit_sltu(r_s0, r_A, r_X, ctx);
1055 /* A < (K|X) ? r_scrach = 1 */
1056 b_off = b_imm(i + inst->jf + 1, ctx);
1057 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
1060 /* A > (K|X) ? scratch = 0 */
1061 if (condt & MIPS_COND_GT) {
1062 /* Checking for equality */
1063 ctx->flags |= SEEN_A | SEEN_X;
1064 if (condt & MIPS_COND_K)
1065 emit_load_imm(r_s0, k, ctx);
1067 emit_jit_reg_move(r_s0, r_X,
1069 b_off = b_imm(i + inst->jf + 1, ctx);
1070 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1073 /* Finally, A > K|X */
1074 b_off = b_imm(i + inst->jt + 1, ctx);
1078 /* A >= (K|X) so jump */
1079 b_off = b_imm(i + inst->jt + 1, ctx);
1085 if (condt & MIPS_COND_K) { /* K */
1086 ctx->flags |= SEEN_A;
1087 emit_load_imm(r_s0, k, ctx);
1089 b_off = b_imm(i + inst->jt + 1, ctx);
1090 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1094 b_off = b_imm(i + inst->jf + 1,
1096 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1101 ctx->flags |= SEEN_A | SEEN_X;
1102 b_off = b_imm(i + inst->jt + 1,
1104 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1108 b_off = b_imm(i + inst->jf + 1, ctx);
1109 emit_bcond(MIPS_COND_NE, r_A, r_X,
1115 case BPF_JMP | BPF_JSET | BPF_K:
1116 ctx->flags |= SEEN_A;
1117 /* pc += (A & K) ? pc -> jt : pc -> jf */
1118 emit_load_imm(r_s1, k, ctx);
1119 emit_and(r_s0, r_A, r_s1, ctx);
1121 b_off = b_imm(i + inst->jt + 1, ctx);
1122 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1125 b_off = b_imm(i + inst->jf + 1, ctx);
1129 case BPF_JMP | BPF_JSET | BPF_X:
1130 ctx->flags |= SEEN_X | SEEN_A;
1131 /* pc += (A & X) ? pc -> jt : pc -> jf */
1132 emit_and(r_s0, r_A, r_X, ctx);
1134 b_off = b_imm(i + inst->jt + 1, ctx);
1135 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1138 b_off = b_imm(i + inst->jf + 1, ctx);
1142 case BPF_RET | BPF_A:
1143 ctx->flags |= SEEN_A;
1144 if (i != prog->len - 1)
1146 * If this is not the last instruction
1147 * then jump to the epilogue
1149 emit_b(b_imm(prog->len, ctx), ctx);
1150 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1152 case BPF_RET | BPF_K:
1154 * It can emit two instructions so it does not fit on
1157 emit_load_imm(r_ret, k, ctx);
1158 if (i != prog->len - 1) {
1160 * If this is not the last instruction
1161 * then jump to the epilogue
1163 emit_b(b_imm(prog->len, ctx), ctx);
1167 case BPF_MISC | BPF_TAX:
1169 ctx->flags |= SEEN_X | SEEN_A;
1170 emit_jit_reg_move(r_X, r_A, ctx);
1172 case BPF_MISC | BPF_TXA:
1174 ctx->flags |= SEEN_A | SEEN_X;
1175 emit_jit_reg_move(r_A, r_X, ctx);
1178 case BPF_ANC | SKF_AD_PROTOCOL:
1179 /* A = ntohs(skb->protocol */
1180 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1181 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1183 off = offsetof(struct sk_buff, protocol);
1184 emit_half_load(r_A, r_skb, off, ctx);
1185 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1186 /* This needs little endian fixup */
1188 /* R2 and later have the wsbh instruction */
1189 emit_wsbh(r_A, r_A, ctx);
1191 /* Get first byte */
1192 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1194 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1195 /* Get second byte */
1196 emit_srl(r_tmp_imm, r_A, 8, ctx);
1197 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1198 /* Put everyting together in r_A */
1199 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1203 case BPF_ANC | SKF_AD_CPU:
1204 ctx->flags |= SEEN_A | SEEN_OFF;
1205 /* A = current_thread_info()->cpu */
1206 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1208 off = offsetof(struct thread_info, cpu);
1209 /* $28/gp points to the thread_info struct */
1210 emit_load(r_A, 28, off, ctx);
1212 case BPF_ANC | SKF_AD_IFINDEX:
1213 /* A = skb->dev->ifindex */
1214 ctx->flags |= SEEN_SKB | SEEN_A;
1215 off = offsetof(struct sk_buff, dev);
1216 /* Load *dev pointer */
1217 emit_load_ptr(r_s0, r_skb, off, ctx);
1218 /* error (0) in the delay slot */
1219 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1220 b_imm(prog->len, ctx), ctx);
1221 emit_reg_move(r_ret, r_zero, ctx);
1222 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
1224 off = offsetof(struct net_device, ifindex);
1225 emit_load(r_A, r_s0, off, ctx);
1227 case BPF_ANC | SKF_AD_MARK:
1228 ctx->flags |= SEEN_SKB | SEEN_A;
1229 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1230 off = offsetof(struct sk_buff, mark);
1231 emit_load(r_A, r_skb, off, ctx);
1233 case BPF_ANC | SKF_AD_RXHASH:
1234 ctx->flags |= SEEN_SKB | SEEN_A;
1235 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1236 off = offsetof(struct sk_buff, hash);
1237 emit_load(r_A, r_skb, off, ctx);
1239 case BPF_ANC | SKF_AD_VLAN_TAG:
1240 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1241 ctx->flags |= SEEN_SKB | SEEN_A;
1242 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1244 off = offsetof(struct sk_buff, vlan_tci);
1245 emit_half_load(r_s0, r_skb, off, ctx);
1246 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1247 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1249 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1250 /* return 1 if present */
1251 emit_sltu(r_A, r_zero, r_A, ctx);
1254 case BPF_ANC | SKF_AD_PKTTYPE:
1255 ctx->flags |= SEEN_SKB;
1257 emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1258 /* Keep only the last 3 bits */
1259 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1260 #ifdef __BIG_ENDIAN_BITFIELD
1261 /* Get the actual packet type to the lower 3 bits */
1262 emit_srl(r_A, r_A, 5, ctx);
1265 case BPF_ANC | SKF_AD_QUEUE:
1266 ctx->flags |= SEEN_SKB | SEEN_A;
1267 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1268 queue_mapping) != 2);
1269 BUILD_BUG_ON(offsetof(struct sk_buff,
1270 queue_mapping) > 0xff);
1271 off = offsetof(struct sk_buff, queue_mapping);
1272 emit_half_load(r_A, r_skb, off, ctx);
1275 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1281 /* compute offsets only during the first pass */
1282 if (ctx->target == NULL)
1283 ctx->offsets[i] = ctx->idx * 4;
1288 int bpf_jit_enable __read_mostly;
1290 void bpf_jit_compile(struct bpf_prog *fp)
1293 unsigned int alloc_size, tmp_idx;
1295 if (!bpf_jit_enable)
1298 memset(&ctx, 0, sizeof(ctx));
1300 ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
1301 if (ctx.offsets == NULL)
1306 if (build_body(&ctx))
1310 build_prologue(&ctx);
1311 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1312 /* just to complete the ctx.idx count */
1313 build_epilogue(&ctx);
1315 alloc_size = 4 * ctx.idx;
1316 ctx.target = module_alloc(alloc_size);
1317 if (ctx.target == NULL)
1321 memset(ctx.target, 0, alloc_size);
1325 /* Generate the actual JIT code */
1326 build_prologue(&ctx);
1328 build_epilogue(&ctx);
1330 /* Update the icache */
1331 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1333 if (bpf_jit_enable > 1)
1335 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1337 fp->bpf_func = (void *)ctx.target;
1344 void bpf_jit_free(struct bpf_prog *fp)
1347 module_memfree(fp->bpf_func);
1349 bpf_prog_unlock_free(fp);