1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
21 /* There are endianness assumptions herein. */
22 #error "Little-endian PPC not supported in BPF compiler"
25 int bpf_jit_enable __read_mostly;
28 static inline void bpf_flush_icache(void *start, void *end)
31 flush_icache_range((unsigned long)start, (unsigned long)end);
34 static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
35 struct codegen_context *ctx)
38 const struct sock_filter *filter = fp->insns;
40 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
42 if (ctx->seen & SEEN_DATAREF) {
43 /* If we call any helpers (for loads), save LR */
44 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
47 /* Back up non-volatile regs. */
48 PPC_STD(r_D, 1, -(8*(32-r_D)));
49 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
51 if (ctx->seen & SEEN_MEM) {
53 * Conditionally save regs r15-r31 as some will be used
56 for (i = r_M; i < (r_M+16); i++) {
57 if (ctx->seen & (1 << (i-r_M)))
58 PPC_STD(i, 1, -(8*(32-i)));
61 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
62 (-BPF_PPC_STACKFRAME & 0xfffc));
65 if (ctx->seen & SEEN_DATAREF) {
67 * If this filter needs to access skb data,
68 * prepare r_D and r_HL:
69 * r_HL = skb->len - skb->data_len
72 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
74 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
75 PPC_SUB(r_HL, r_HL, r_scratch1);
76 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
79 if (ctx->seen & SEEN_XREG) {
81 * TODO: Could also detect whether first instr. sets X and
82 * avoid this (as below, with A).
87 switch (filter[0].code) {
90 case BPF_S_ANC_PROTOCOL:
91 case BPF_S_ANC_IFINDEX:
93 case BPF_S_ANC_RXHASH:
94 case BPF_S_ANC_VLAN_TAG:
95 case BPF_S_ANC_VLAN_TAG_PRESENT:
101 /* first instruction sets A register (or is RET 'constant') */
104 /* make sure we dont leak kernel information to user */
109 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
113 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
114 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
115 if (ctx->seen & SEEN_DATAREF) {
118 PPC_LD(r_D, 1, -(8*(32-r_D)));
119 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
121 if (ctx->seen & SEEN_MEM) {
122 /* Restore any saved non-vol registers */
123 for (i = r_M; i < (r_M+16); i++) {
124 if (ctx->seen & (1 << (i-r_M)))
125 PPC_LD(i, 1, -(8*(32-i)));
129 /* The RETs have left a return value in R3. */
134 #define CHOOSE_LOAD_FUNC(K, func) \
135 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
137 /* Assemble the body code between the prologue & epilogue. */
138 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
139 struct codegen_context *ctx,
142 const struct sock_filter *filter = fp->insns;
145 unsigned int true_cond;
148 /* Start of epilogue code */
149 unsigned int exit_addr = addrs[flen];
151 for (i = 0; i < flen; i++) {
152 unsigned int K = filter[i].k;
155 * addrs[] maps a BPF bytecode address into a real offset from
156 * the start of the body code.
158 addrs[i] = ctx->idx * 4;
160 switch (filter[i].code) {
162 case BPF_S_ALU_ADD_X: /* A += X; */
163 ctx->seen |= SEEN_XREG;
164 PPC_ADD(r_A, r_A, r_X);
166 case BPF_S_ALU_ADD_K: /* A += K; */
169 PPC_ADDI(r_A, r_A, IMM_L(K));
171 PPC_ADDIS(r_A, r_A, IMM_HA(K));
173 case BPF_S_ALU_SUB_X: /* A -= X; */
174 ctx->seen |= SEEN_XREG;
175 PPC_SUB(r_A, r_A, r_X);
177 case BPF_S_ALU_SUB_K: /* A -= K */
180 PPC_ADDI(r_A, r_A, IMM_L(-K));
182 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
184 case BPF_S_ALU_MUL_X: /* A *= X; */
185 ctx->seen |= SEEN_XREG;
186 PPC_MUL(r_A, r_A, r_X);
188 case BPF_S_ALU_MUL_K: /* A *= K */
190 PPC_MULI(r_A, r_A, K);
192 PPC_LI32(r_scratch1, K);
193 PPC_MUL(r_A, r_A, r_scratch1);
196 case BPF_S_ALU_DIV_X: /* A /= X; */
197 ctx->seen |= SEEN_XREG;
199 if (ctx->pc_ret0 != -1) {
200 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
203 * Exit, returning 0; first pass hits here
204 * (longer worst-case code size).
206 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
210 PPC_DIVWU(r_A, r_A, r_X);
212 case BPF_S_ALU_DIV_K: /* A /= K */
215 PPC_LI32(r_scratch1, K);
216 PPC_DIVWU(r_A, r_A, r_scratch1);
218 case BPF_S_ALU_AND_X:
219 ctx->seen |= SEEN_XREG;
220 PPC_AND(r_A, r_A, r_X);
222 case BPF_S_ALU_AND_K:
224 PPC_ANDI(r_A, r_A, K);
226 PPC_LI32(r_scratch1, K);
227 PPC_AND(r_A, r_A, r_scratch1);
231 ctx->seen |= SEEN_XREG;
232 PPC_OR(r_A, r_A, r_X);
236 PPC_ORI(r_A, r_A, IMM_L(K));
238 PPC_ORIS(r_A, r_A, IMM_H(K));
240 case BPF_S_ANC_ALU_XOR_X:
241 case BPF_S_ALU_XOR_X: /* A ^= X */
242 ctx->seen |= SEEN_XREG;
243 PPC_XOR(r_A, r_A, r_X);
245 case BPF_S_ALU_XOR_K: /* A ^= K */
247 PPC_XORI(r_A, r_A, IMM_L(K));
249 PPC_XORIS(r_A, r_A, IMM_H(K));
251 case BPF_S_ALU_LSH_X: /* A <<= X; */
252 ctx->seen |= SEEN_XREG;
253 PPC_SLW(r_A, r_A, r_X);
255 case BPF_S_ALU_LSH_K:
259 PPC_SLWI(r_A, r_A, K);
261 case BPF_S_ALU_RSH_X: /* A >>= X; */
262 ctx->seen |= SEEN_XREG;
263 PPC_SRW(r_A, r_A, r_X);
265 case BPF_S_ALU_RSH_K: /* A >>= K; */
269 PPC_SRWI(r_A, r_A, K);
277 if (ctx->pc_ret0 == -1)
281 * If this isn't the very last instruction, branch to
282 * the epilogue if we've stuff to clean up. Otherwise,
283 * if there's nothing to tidy, just return. If we /are/
284 * the last instruction, we're about to fall through to
285 * the epilogue to return.
289 * Note: 'seen' is properly valid only on pass
290 * #2. Both parts of this conditional are the
291 * same instruction size though, meaning the
292 * first pass will still correctly determine the
293 * code size/addresses.
310 case BPF_S_MISC_TAX: /* X = A */
313 case BPF_S_MISC_TXA: /* A = X */
314 ctx->seen |= SEEN_XREG;
318 /*** Constant loads/M[] access ***/
319 case BPF_S_LD_IMM: /* A = K */
322 case BPF_S_LDX_IMM: /* X = K */
325 case BPF_S_LD_MEM: /* A = mem[K] */
326 PPC_MR(r_A, r_M + (K & 0xf));
327 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
329 case BPF_S_LDX_MEM: /* X = mem[K] */
330 PPC_MR(r_X, r_M + (K & 0xf));
331 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
333 case BPF_S_ST: /* mem[K] = A */
334 PPC_MR(r_M + (K & 0xf), r_A);
335 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
337 case BPF_S_STX: /* mem[K] = X */
338 PPC_MR(r_M + (K & 0xf), r_X);
339 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
341 case BPF_S_LD_W_LEN: /* A = skb->len; */
342 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
343 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
345 case BPF_S_LDX_W_LEN: /* X = skb->len; */
346 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
349 /*** Ancillary info loads ***/
351 /* None of the BPF_S_ANC* codes appear to be passed by
352 * sk_chk_filter(). The interpreter and the x86 BPF
353 * compiler implement them so we do too -- they may be
356 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
357 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
359 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
361 /* ntohs is a NOP with BE loads. */
363 case BPF_S_ANC_IFINDEX:
364 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
366 PPC_CMPDI(r_scratch1, 0);
367 if (ctx->pc_ret0 != -1) {
368 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
370 /* Exit, returning 0; first pass hits here. */
371 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
375 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
377 PPC_LWZ_OFFS(r_A, r_scratch1,
378 offsetof(struct net_device, ifindex));
381 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
382 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
385 case BPF_S_ANC_RXHASH:
386 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
387 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
390 case BPF_S_ANC_VLAN_TAG:
391 case BPF_S_ANC_VLAN_TAG_PRESENT:
392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395 if (filter[i].code == BPF_S_ANC_VLAN_TAG)
396 PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
400 case BPF_S_ANC_QUEUE:
401 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
402 queue_mapping) != 2);
403 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
410 * raw_smp_processor_id() = local_paca->paca_index
412 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
414 PPC_LHZ_OFFS(r_A, 13,
415 offsetof(struct paca_struct, paca_index));
421 /*** Absolute loads from packet header/data ***/
423 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
426 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
429 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
432 ctx->seen |= SEEN_DATAREF;
433 PPC_LI64(r_scratch1, func);
434 PPC_MTLR(r_scratch1);
438 * Helper returns 'lt' condition on error, and an
439 * appropriate return value in r3
441 PPC_BCC(COND_LT, exit_addr);
444 /*** Indirect loads from packet header/data ***/
447 goto common_load_ind;
450 goto common_load_ind;
455 * Load from [X + K]. Negative offsets are tested for
456 * in the helper functions.
458 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
459 PPC_LI64(r_scratch1, func);
460 PPC_MTLR(r_scratch1);
461 PPC_ADDI(r_addr, r_X, IMM_L(K));
463 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
465 /* If error, cr0.LT set */
466 PPC_BCC(COND_LT, exit_addr);
469 case BPF_S_LDX_B_MSH:
470 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
474 /*** Jump and branches ***/
477 PPC_JMP(addrs[i + 1 + K]);
480 case BPF_S_JMP_JGT_K:
481 case BPF_S_JMP_JGT_X:
484 case BPF_S_JMP_JGE_K:
485 case BPF_S_JMP_JGE_X:
488 case BPF_S_JMP_JEQ_K:
489 case BPF_S_JMP_JEQ_X:
492 case BPF_S_JMP_JSET_K:
493 case BPF_S_JMP_JSET_X:
497 /* same targets, can avoid doing the test :) */
498 if (filter[i].jt == filter[i].jf) {
499 if (filter[i].jt > 0)
500 PPC_JMP(addrs[i + 1 + filter[i].jt]);
504 switch (filter[i].code) {
505 case BPF_S_JMP_JGT_X:
506 case BPF_S_JMP_JGE_X:
507 case BPF_S_JMP_JEQ_X:
508 ctx->seen |= SEEN_XREG;
511 case BPF_S_JMP_JSET_X:
512 ctx->seen |= SEEN_XREG;
513 PPC_AND_DOT(r_scratch1, r_A, r_X);
515 case BPF_S_JMP_JEQ_K:
516 case BPF_S_JMP_JGT_K:
517 case BPF_S_JMP_JGE_K:
521 PPC_LI32(r_scratch1, K);
522 PPC_CMPLW(r_A, r_scratch1);
525 case BPF_S_JMP_JSET_K:
527 /* PPC_ANDI is /only/ dot-form */
528 PPC_ANDI(r_scratch1, r_A, K);
530 PPC_LI32(r_scratch1, K);
531 PPC_AND_DOT(r_scratch1, r_A,
536 /* Sometimes branches are constructed "backward", with
537 * the false path being the branch and true path being
538 * a fallthrough to the next instruction.
540 if (filter[i].jt == 0)
541 /* Swap the sense of the branch */
542 PPC_BCC(true_cond ^ COND_CMP_TRUE,
543 addrs[i + 1 + filter[i].jf]);
545 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
546 if (filter[i].jf != 0)
547 PPC_JMP(addrs[i + 1 + filter[i].jf]);
551 /* The filter contains something cruel & unusual.
552 * We don't handle it, but also there shouldn't be
553 * anything missing from our list.
555 if (printk_ratelimit())
556 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
562 /* Set end-of-body-code address for exit. */
563 addrs[i] = ctx->idx * 4;
568 void bpf_jit_compile(struct sk_filter *fp)
570 unsigned int proglen;
571 unsigned int alloclen;
575 struct codegen_context cgctx;
582 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
587 * There are multiple assembly passes as the generated code will change
588 * size as it settles down, figuring out the max branch offsets/exit
591 * The range of standard conditional branches is +/- 32Kbytes. Since
592 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
593 * finish with 8 bytes/instruction. Not feasible, so long jumps are
594 * used, distinct from short branches.
598 * For now, both branch types assemble to 2 words (short branches padded
599 * with a NOP); this is less efficient, but assembly will always complete
600 * after exactly 3 passes:
602 * First pass: No code buffer; Program is "faux-generated" -- no code
603 * emitted but maximum size of output determined (and addrs[] filled
604 * in). Also, we note whether we use M[], whether we use skb data, etc.
605 * All generation choices assumed to be 'worst-case', e.g. branches all
606 * far (2 instructions), return path code reduction not available, etc.
608 * Second pass: Code buffer allocated with size determined previously.
609 * Prologue generated to support features we have seen used. Exit paths
610 * determined and addrs[] is filled in again, as code may be slightly
611 * smaller as a result.
613 * Third pass: Code generated 'for real', and branch destinations
614 * determined from now-accurate addrs[] map.
618 * If we optimise this, near branches will be shorter. On the
619 * first assembly pass, we should err on the side of caution and
620 * generate the biggest code. On subsequent passes, branches will be
621 * generated short or long and code size will reduce. With smaller
622 * code, more branches may fall into the short category, and code will
625 * Finally, if we see one pass generate code the same size as the
626 * previous pass we have converged and should now generate code for
627 * real. Allocating at the end will also save the memory that would
628 * otherwise be wasted by the (small) current code shrinkage.
629 * Preferably, we should do a small number of passes (e.g. 5) and if we
630 * haven't converged by then, get impatient and force code to generate
631 * as-is, even if the odd branch would be left long. The chances of a
632 * long jump are tiny with all but the most enormous of BPF filter
633 * inputs, so we should usually converge on the third pass.
639 /* Scouting faux-generate pass 0 */
640 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
641 /* We hit something illegal or unsupported. */
645 * Pretend to build prologue, given the features we've seen. This will
646 * update ctgtx.idx as it pretends to output instructions, then we can
647 * calculate total size from idx.
649 bpf_jit_build_prologue(fp, 0, &cgctx);
650 bpf_jit_build_epilogue(0, &cgctx);
652 proglen = cgctx.idx * 4;
653 alloclen = proglen + FUNCTION_DESCR_SIZE;
654 image = module_alloc(max_t(unsigned int, alloclen,
655 sizeof(struct work_struct)));
659 code_base = image + (FUNCTION_DESCR_SIZE/4);
661 /* Code generation passes 1-2 */
662 for (pass = 1; pass < 3; pass++) {
663 /* Now build the prologue, body code & epilogue for real. */
665 bpf_jit_build_prologue(fp, code_base, &cgctx);
666 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
667 bpf_jit_build_epilogue(code_base, &cgctx);
669 if (bpf_jit_enable > 1)
670 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
671 proglen - (cgctx.idx * 4), cgctx.seen);
674 if (bpf_jit_enable > 1)
675 /* Note that we output the base address of the code_base
676 * rather than image, since opcodes are in code_base.
678 bpf_jit_dump(flen, proglen, pass, code_base);
681 bpf_flush_icache(code_base, code_base + (proglen/4));
682 /* Function descriptor nastiness: Address + TOC */
683 ((u64 *)image)[0] = (u64)code_base;
684 ((u64 *)image)[1] = local_paca->kernel_toc;
685 fp->bpf_func = (void *)image;
692 static void jit_free_defer(struct work_struct *arg)
694 module_free(NULL, arg);
697 /* run from softirq, we must use a work_struct to call
698 * module_free() from process context
700 void bpf_jit_free(struct sk_filter *fp)
702 if (fp->bpf_func != sk_run_filter) {
703 struct work_struct *work = (struct work_struct *)fp->bpf_func;
705 INIT_WORK(work, jit_free_defer);