summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2022-01-06 17:15:06 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2022-01-15 12:21:24 +1100
commitfab07611fb2e6a15fac05c4583045ca5582fd826 (patch)
treedb2568d475a92f7d0782712a85549f0fa1870edd /arch/powerpc
parentb992f01e66150fc5e90be4a96f5eb8e634c8249e (diff)
powerpc32/bpf: Fix codegen for bpf-to-bpf calls
Pad instructions emitted for BPF_CALL so that the number of instructions generated does not change for different function addresses. This is especially important for calls to other bpf functions, whose address will only be known during extra pass. Fixes: 51c66ad849a703 ("powerpc/bpf: Implement extended BPF on PPC32") Cc: stable@vger.kernel.org # v5.13+ Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/52d8fe51f7620a6f27f377791564d79d75463576.1641468127.git.naveen.n.rao@linux.vnet.ibm.com
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index faaebd446cad..c20b49bf8f5b 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -191,6 +191,9 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
if (image && rel < 0x2000000 && rel >= -0x2000000) {
PPC_BL_ABS(func);
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
} else {
/* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));