Merge tag 'isci-for-3.5' into misc
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / net / bpf_jit_64.S
index ff4506e85cce80bc2fdb7003078cec3f5c0d9962..55ba3855a97f58093ec06f343cde1df2dffbb17b 100644 (file)
  * then branch directly to slow_path_XXX if required.  (In fact, could
  * load a spare GPR with the address of slow_path_generic and pass size
  * as an argument, making the call site a mtlr, li and bllr.)
- *
- * Technically, the "is addr < 0" check is unnecessary & slowing down
- * the ABS path, as it's statically checked on generation.
  */
        .globl  sk_load_word
 sk_load_word:
        cmpdi   r_addr, 0
-       blt     bpf_error
+       blt     bpf_slow_path_word_neg
+       .globl  sk_load_word_positive_offset
+sk_load_word_positive_offset:
        /* Are we accessing past headlen? */
        subi    r_scratch1, r_HL, 4
        cmpd    r_scratch1, r_addr
@@ -51,7 +50,9 @@ sk_load_word:
        .globl  sk_load_half
 sk_load_half:
        cmpdi   r_addr, 0
-       blt     bpf_error
+       blt     bpf_slow_path_half_neg
+       .globl  sk_load_half_positive_offset
+sk_load_half_positive_offset:
        subi    r_scratch1, r_HL, 2
        cmpd    r_scratch1, r_addr
        blt     bpf_slow_path_half
@@ -61,7 +62,9 @@ sk_load_half:
        .globl  sk_load_byte
 sk_load_byte:
        cmpdi   r_addr, 0
-       blt     bpf_error
+       blt     bpf_slow_path_byte_neg
+       .globl  sk_load_byte_positive_offset
+sk_load_byte_positive_offset:
        cmpd    r_HL, r_addr
        ble     bpf_slow_path_byte
        lbzx    r_A, r_D, r_addr
@@ -69,22 +72,20 @@ sk_load_byte:
 
 /*
  * BPF_S_LDX_B_MSH: ldxb  4*([offset]&0xf)
- * r_addr is the offset value, already known positive
+ * r_addr is the offset value
  */
        .globl sk_load_byte_msh
 sk_load_byte_msh:
+       cmpdi   r_addr, 0
+       blt     bpf_slow_path_byte_msh_neg
+       .globl sk_load_byte_msh_positive_offset
+sk_load_byte_msh_positive_offset:
        cmpd    r_HL, r_addr
        ble     bpf_slow_path_byte_msh
        lbzx    r_X, r_D, r_addr
        rlwinm  r_X, r_X, 2, 32-4-2, 31-2
        blr
 
-bpf_error:
-       /* Entered with cr0 = lt */
-       li      r3, 0
-       /* Generated code will 'blt epilogue', returning 0. */
-       blr
-
 /* Call out to skb_copy_bits:
  * We'll need to back up our volatile regs first; we have
  * local variable space at r1+(BPF_PPC_STACK_BASIC).
@@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
        lbz     r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
        rlwinm  r_X, r_X, 2, 32-4-2, 31-2
        blr
+
+/* Call out to bpf_internal_load_pointer_neg_helper:
+ * We'll need to back up our volatile regs first; we have
+ * local variable space at r1+(BPF_PPC_STACK_BASIC).
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define sk_negative_common(SIZE)                               \
+       mflr    r0;                                             \
+       std     r0, 16(r1);                                     \
+       /* R3 goes in parameter space of caller's frame */      \
+       std     r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
+       std     r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
+       std     r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
+       stdu    r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
+       /* R3 = r_skb, as passed */                             \
+       mr      r4, r_addr;                                     \
+       li      r5, SIZE;                                       \
+       bl      bpf_internal_load_pointer_neg_helper;           \
+       /* R3 != 0 on success */                                \
+       addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
+       ld      r0, 16(r1);                                     \
+       ld      r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);           \
+       ld      r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);           \
+       mtlr    r0;                                             \
+       cmpldi  r3, 0;                                          \
+       beq     bpf_error_slow; /* cr0 = EQ */                  \
+       mr      r_addr, r3;                                     \
+       ld      r_skb, (BPF_PPC_STACKFRAME+48)(r1);             \
+       /* Great success! */
+
+bpf_slow_path_word_neg:
+       lis     r_scratch1,-32  /* SKF_LL_OFF */
+       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       blt     bpf_error       /* cr0 = LT */
+       .globl  sk_load_word_negative_offset
+sk_load_word_negative_offset:
+       sk_negative_common(4)
+       lwz     r_A, 0(r_addr)
+       blr
+
+bpf_slow_path_half_neg:
+       lis     r_scratch1,-32  /* SKF_LL_OFF */
+       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       blt     bpf_error       /* cr0 = LT */
+       .globl  sk_load_half_negative_offset
+sk_load_half_negative_offset:
+       sk_negative_common(2)
+       lhz     r_A, 0(r_addr)
+       blr
+
+bpf_slow_path_byte_neg:
+       lis     r_scratch1,-32  /* SKF_LL_OFF */
+       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       blt     bpf_error       /* cr0 = LT */
+       .globl  sk_load_byte_negative_offset
+sk_load_byte_negative_offset:
+       sk_negative_common(1)
+       lbz     r_A, 0(r_addr)
+       blr
+
+bpf_slow_path_byte_msh_neg:
+       lis     r_scratch1,-32  /* SKF_LL_OFF */
+       cmpd    r_addr, r_scratch1      /* addr < SKF_* */
+       blt     bpf_error       /* cr0 = LT */
+       .globl  sk_load_byte_msh_negative_offset
+sk_load_byte_msh_negative_offset:
+       sk_negative_common(1)
+       lbz     r_X, 0(r_addr)
+       rlwinm  r_X, r_X, 2, 32-4-2, 31-2
+       blr
+
+bpf_error_slow:
+       /* fabricate a cr0 = lt */
+       li      r_scratch1, -1
+       cmpdi   r_scratch1, 0
+bpf_error:
+       /* Entered with cr0 = lt */
+       li      r3, 0
+       /* Generated code will 'blt epilogue', returning 0. */
+       blr