Skip to content

Commit 1578202

Browse files
hbathinimaddy-kerneldev
authored andcommitted
powerpc64/bpf: fix the address returned by bpf_get_func_ip
bpf_get_func_ip() helper function returns the address of the traced function. It relies on the IP address stored at ctx - 16 by the bpf trampoline. On 64-bit powerpc, this address is recovered from LR accounting for OOL trampoline. But the address stored here was off by 4-bytes. Ensure the address is the actual start of the traced function. Reported-by: Abhishek Dubey <adubey@linux.ibm.com> Fixes: d243b62 ("powerpc64/bpf: Add support for bpf trampolines") Cc: stable@vger.kernel.org Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20260303181031.390073-3-hbathini@linux.ibm.com
1 parent 521bd39 commit 1578202

1 file changed

Lines changed: 19 additions & 9 deletions

File tree

arch/powerpc/net/bpf_jit_comp.c

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -785,9 +785,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
785785
* retval_off [ return value ]
786786
* [ reg argN ]
787787
* [ ... ]
788-
* regs_off [ reg_arg1 ] prog ctx context
789-
* nregs_off [ args count ]
790-
* ip_off [ traced function ]
788+
* regs_off [ reg_arg1 ] prog_ctx
789+
* nregs_off [ args count ] ((u64 *)prog_ctx)[-1]
790+
* ip_off [ traced function ] ((u64 *)prog_ctx)[-2]
791791
* [ ... ]
792792
* run_ctx_off [ bpf_tramp_run_ctx ]
793793
* [ reg argN ]
@@ -895,32 +895,42 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
895895

896896
bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off);
897897

898-
/* Save our return address */
898+
/* Save our LR/return address */
899899
EMIT(PPC_RAW_MFLR(_R3));
900900
if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
901901
EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off));
902902
else
903903
EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
904904

905905
/*
906-
* Save ip address of the traced function.
907-
* We could recover this from LR, but we will need to address for OOL trampoline,
908-
* and optional GEP area.
906+
* Derive IP address of the traced function.
907+
* In case of CONFIG_PPC_FTRACE_OUT_OF_LINE or BPF program, LR points to the instruction
908+
* after the 'bl' instruction in the OOL stub. Refer to ftrace_init_ool_stub() and
909+
* bpf_arch_text_poke() for OOL stub of kernel functions and bpf programs respectively.
910+
* Relevant stub sequence:
911+
*
912+
* bl <tramp>
913+
* LR (R3) => mtlr r0
914+
* b <func_addr+4>
915+
*
916+
* Recover kernel function/bpf program address from the unconditional
917+
* branch instruction at the end of OOL stub.
909918
*/
910919
if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) {
911920
EMIT(PPC_RAW_LWZ(_R4, _R3, 4));
912921
EMIT(PPC_RAW_SLWI(_R4, _R4, 6));
913922
EMIT(PPC_RAW_SRAWI(_R4, _R4, 6));
914923
EMIT(PPC_RAW_ADD(_R3, _R3, _R4));
915-
EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
916924
}
917925

918926
if (flags & BPF_TRAMP_F_IP_ARG)
919927
EMIT(PPC_RAW_STL(_R3, _R1, ip_off));
920928

921-
if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
929+
if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
922930
/* Fake our LR for unwind */
931+
EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
923932
EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
933+
}
924934

925935
/* Save function arg count -- see bpf_get_func_arg_cnt() */
926936
EMIT(PPC_RAW_LI(_R3, nr_regs));

0 commit comments

Comments
 (0)