diff options
author | Alexei Starovoitov <ast@kernel.org> | 2022-10-25 23:11:47 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2022-10-25 23:11:47 -0700 |
commit | fdf457871e7c070416403efd1533ba49adc20a87 (patch) | |
tree | fdf22af5765c1f3c2e0c31b61f25e4e249459a20 /arch/x86/net/bpf_jit_comp.c | |
parent | f3c51fe02c55bd944662714e5b91b96dc271ad9f (diff) | |
parent | 387b532138eed5b12e1afa68cafb6a389507310f (diff) |
Merge branch 'bpf: Avoid unnecessary deadlock detection and failure in task storage'
Martin KaFai Lau says:
====================
From: Martin KaFai Lau <martin.lau@kernel.org>
The commit bc235cdb423a ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")
added deadlock detection to avoid a tracing program from recurring
on the bpf_task_storage_{get,delete}() helpers. These helpers acquire
a spin lock and it will lead to deadlock.
It is unnecessary for the bpf_lsm and bpf_iter programs which do
not recur. The situation is the same as the existing
bpf_pid_task_storage_{lookup,delete}_elem() which are
used in the syscall and they also do not have deadlock detection.
This set is to add new bpf_task_storage_{get,delete}() helper proto
without the deadlock detection. The set also removes the prog->active
check from the bpf_lsm and bpf_iter program. Please see the individual
patch for details.
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'arch/x86/net/bpf_jit_comp.c')
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 19 |
1 files changed, 2 insertions, 17 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d7dd8e0db8da..36ffe67ad6e5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1894,10 +1894,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_link *l, int stack_size, int run_ctx_off, bool save_ret) { - void (*exit)(struct bpf_prog *prog, u64 start, - struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit; - u64 (*enter)(struct bpf_prog *prog, - struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter; u8 *prog = *pprog; u8 *jmp_insn; int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); @@ -1916,23 +1912,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); - if (p->aux->sleepable) { - enter = __bpf_prog_enter_sleepable; - exit = __bpf_prog_exit_sleepable; - } else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) { - enter = __bpf_prog_enter_struct_ops; - exit = __bpf_prog_exit_struct_ops; - } else if (p->expected_attach_type == BPF_LSM_CGROUP) { - enter = __bpf_prog_enter_lsm_cgroup; - exit = __bpf_prog_exit_lsm_cgroup; - } - /* arg1: mov rdi, progs[i] */ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); /* arg2: lea rsi, [rbp - ctx_cookie_off] */ EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); - if (emit_call(&prog, enter, prog)) + if (emit_call(&prog, bpf_trampoline_enter(p), prog)) return -EINVAL; /* remember prog start time returned by __bpf_prog_enter */ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); @@ -1977,7 +1962,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); /* arg3: lea rdx, [rbp - run_ctx_off] */ EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); - if (emit_call(&prog, exit, prog)) + if (emit_call(&prog, bpf_trampoline_exit(p), prog)) return -EINVAL; *pprog = prog; |