@@ -451,10 +451,28 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), void *cookie
451451 }
452452}
453453
454+ static int bpf_jit_emit_func_call (u32 * image , struct codegen_context * ctx , u64 func_addr , int reg )
455+ {
456+ long reladdr = func_addr - kernel_toc_addr ();
457+
458+ if (reladdr > 0x7FFFFFFF || reladdr < - (0x80000000L )) {
459+ pr_err ("eBPF: address of %ps out of range of kernel_toc.\n" , (void * )func_addr );
460+ return - ERANGE ;
461+ }
462+
463+ EMIT (PPC_RAW_ADDIS (reg , _R2 , PPC_HA (reladdr )));
464+ EMIT (PPC_RAW_ADDI (reg , reg , PPC_LO (reladdr )));
465+ EMIT (PPC_RAW_MTCTR (reg ));
466+ EMIT (PPC_RAW_BCTRL ());
467+
468+ return 0 ;
469+ }
470+
454471int bpf_jit_emit_func_call_rel (u32 * image , u32 * fimage , struct codegen_context * ctx , u64 func )
455472{
456473 unsigned long func_addr = func ? ppc_function_entry ((void * )func ) : 0 ;
457- long reladdr ;
474+ long __maybe_unused reladdr ;
475+ int ret ;
458476
459477 /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
460478 if (!func ) {
@@ -507,16 +525,9 @@ int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *
507525 EMIT (PPC_RAW_BCTRL ());
508526#else
509527 if (core_kernel_text (func_addr )) {
510- reladdr = func_addr - kernel_toc_addr ();
511- if (reladdr > 0x7FFFFFFF || reladdr < - (0x80000000L )) {
512- pr_err ("eBPF: address of %ps out of range of kernel_toc.\n" , (void * )func );
513- return - ERANGE ;
514- }
515-
516- EMIT (PPC_RAW_ADDIS (_R12 , _R2 , PPC_HA (reladdr )));
517- EMIT (PPC_RAW_ADDI (_R12 , _R12 , PPC_LO (reladdr )));
518- EMIT (PPC_RAW_MTCTR (_R12 ));
519- EMIT (PPC_RAW_BCTRL ());
528+ ret = bpf_jit_emit_func_call (image , ctx , func_addr , _R12 );
529+ if (ret )
530+ return ret ;
520531 } else {
521532 if (IS_ENABLED (CONFIG_PPC64_ELF_ABI_V1 )) {
522533 /* func points to the function descriptor */
@@ -1755,6 +1766,35 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
17551766 if (ret < 0 )
17561767 return ret ;
17571768
1769+ /*
1770+ * Call to arch_bpf_timed_may_goto() is emitted by the
1771+ * verifier and called with custom calling convention with
1772+ * first argument and return value in BPF_REG_AX (_R12).
1773+ *
1774+ * The generic helper or bpf function call emission path
1775+ * may use the same scratch register as BPF_REG_AX to
1776+ * materialize the target address. This would clobber AX
1777+ * and break timed may_goto semantics.
1778+ *
1779+ * Emit a minimal indirect call sequence here using a temp
1780+ * register and skip the normal post-call return-value move.
1781+ */
1782+
1783+ if (func_addr == (u64 )arch_bpf_timed_may_goto ) {
1784+ ret = 0 ;
1785+ if (!IS_ENABLED (CONFIG_PPC_KERNEL_PCREL ))
1786+ ret = bpf_jit_emit_func_call (image , ctx , func_addr ,
1787+ tmp1_reg );
1788+
1789+ if (ret || IS_ENABLED (CONFIG_PPC_KERNEL_PCREL )) {
1790+ PPC_LI_ADDR (tmp1_reg , func_addr );
1791+ EMIT (PPC_RAW_MTCTR (tmp1_reg ));
1792+ EMIT (PPC_RAW_BCTRL ());
1793+ }
1794+
1795+ break ;
1796+ }
1797+
17581798 /* Take care of powerpc ABI requirements before kfunc call */
17591799 if (insn [i ].src_reg == BPF_PSEUDO_KFUNC_CALL ) {
17601800 if (prepare_for_kfunc_call (fp , image , ctx , & insn [i ]))
0 commit comments