summaryrefslogtreecommitdiff
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
commitfcffe2edbd390cad499b27d20512ef000d7ecf54 (patch)
tree13120e1efcf0ad226785b721f4b38272ffdd2028 /kernel/bpf/verifier.c
parent4f83435ad777358d9cdc138868feebbe2a23f577 (diff)
parent624588d9d6cc0a1a270a65fb4d5220f1ceddcf38 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2017-12-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Fix incorrect state pruning related to recognition of zero initialized stack slots, where stacksafe exploration would mistakenly return a positive pruning verdict too early ignoring other slots, from Gianluca. 2) Various BPF to BPF calls related follow-up fixes. Fix an off-by-one in maximum call depth check, and rework maximum stack depth tracking logic to fix a bypass of the total stack size check reported by Jann. Also fix a bug in arm64 JIT where prog->jited_len was uninitialized. Addition of various test cases to BPF selftests, from Alexei. 3) Addition of a BPF selftest to test_verifier that is related to BPF to BPF calls which demonstrates a late caller stack size increase and thus out of bounds access. Fixed above in 2). Test case from Jann. 4) Addition of correlating BPF helper calls, BPF to BPF calls as well as BPF maps to bpftool xlated dump in order to allow for better BPF program introspection and debugging, from Daniel. 5) Fixing several bugs in BPF to BPF calls kallsyms handling in order to get it actually to work for subprogs, from Daniel. 6) Extending sparc64 JIT support for BPF to BPF calls and fix a couple of build errors for libbpf on sparc64, from David. 7) Allow narrower context access for BPF dev cgroup typed programs in order to adapt to LLVM code generation. Also adjust memlock rlimit in the test_dev_cgroup BPF selftest, from Yonghong. 8) Add netdevsim Kconfig entry to BPF selftests since test_offload.py relies on netdevsim device being available, from Jakub. 9) Reduce scope of xdp_do_generic_redirect_map() to being static, from Xiongwei. 10) Minor cleanups and spelling fixes in BPF verifier, from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c126
1 files changed, 101 insertions, 25 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1cd2c2d28fc3..98d8637cf70d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -772,7 +772,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
return -EPERM;
}
if (bpf_prog_is_dev_bound(env->prog->aux)) {
- verbose(env, "funcation calls in offloaded programs are not supported yet\n");
+ verbose(env, "function calls in offloaded programs are not supported yet\n");
return -EINVAL;
}
ret = add_subprog(env, i + insn[i].imm + 1);
@@ -823,6 +823,7 @@ next:
return 0;
}
+static
struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent,
@@ -867,7 +868,7 @@ bug:
verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
verbose(env, "regno %d parent frame %d current frame %d\n",
regno, parent->curframe, state->curframe);
- return 0;
+ return NULL;
}
static int mark_reg_read(struct bpf_verifier_env *env,
@@ -1434,33 +1435,80 @@ static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
- u16 stack = env->subprog_stack_depth[func->subprogno], total = 0;
- struct bpf_verifier_state *cur = env->cur_state;
- int i;
+ u16 stack = env->subprog_stack_depth[func->subprogno];
if (stack >= -off)
return 0;
/* update known max for given subprogram */
env->subprog_stack_depth[func->subprogno] = -off;
+ return 0;
+}
- /* compute the total for current call chain */
- for (i = 0; i <= cur->curframe; i++) {
- u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno];
-
- /* round up to 32-bytes, since this is granularity
- * of interpreter stack sizes
- */
- depth = round_up(depth, 32);
- total += depth;
- }
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+ int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ int ret_insn[MAX_CALL_FRAMES];
+ int ret_prog[MAX_CALL_FRAMES];
- if (total > MAX_BPF_STACK) {
+process_func:
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+ depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ if (depth > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
- cur->curframe, total);
+ frame + 1, depth);
return -EACCES;
}
- return 0;
+continue_func:
+ if (env->subprog_cnt == subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[subprog];
+ for (; i < subprog_end; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ /* remember insn and function to return to */
+ ret_insn[frame] = i + 1;
+ ret_prog[frame] = subprog;
+
+ /* find the callee */
+ i = i + insn[i].imm + 1;
+ subprog = find_subprog(env, i);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ i);
+ return -EFAULT;
+ }
+ subprog++;
+ frame++;
+ if (frame >= MAX_CALL_FRAMES) {
+ WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+ return -EFAULT;
+ }
+ goto process_func;
+ }
+ /* end of for() loop means the last insn of the 'subprog'
+ * was reached. Doesn't matter whether it was JA or EXIT
+ */
+ if (frame == 0)
+ return 0;
+ depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ frame--;
+ i = ret_insn[frame];
+ subprog = ret_prog[frame];
+ goto continue_func;
}
static int get_callee_stack_depth(struct bpf_verifier_env *env,
@@ -2105,9 +2153,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_func_state *caller, *callee;
int i, subprog, target_insn;
- if (state->curframe >= MAX_CALL_FRAMES) {
+ if (state->curframe + 1 >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
- state->curframe);
+ state->curframe + 2);
return -E2BIG;
}
@@ -4155,7 +4203,7 @@ static bool stacksafe(struct bpf_func_state *old,
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
/* explored state didn't use this */
- return true;
+ continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
@@ -4475,9 +4523,12 @@ static int do_check(struct bpf_verifier_env *env)
}
if (env->log.level) {
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = verbose,
+ };
+
verbose(env, "%d: ", insn_idx);
- print_bpf_insn(verbose, env, insn,
- env->allow_ptr_leaks);
+ print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
}
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
@@ -5065,14 +5116,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog;
- struct bpf_insn *insn = prog->insnsi;
+ struct bpf_insn *insn;
void *old_bpf_func;
int err = -ENOMEM;
if (env->subprog_cnt == 0)
return 0;
- for (i = 0; i < prog->len; i++, insn++) {
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
@@ -5111,7 +5162,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
goto out_free;
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
len * sizeof(struct bpf_insn));
+ func[i]->type = prog->type;
func[i]->len = len;
+ if (bpf_prog_calc_tag(func[i]))
+ goto out_free;
func[i]->is_func = 1;
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
@@ -5161,6 +5215,25 @@ static int jit_subprogs(struct bpf_verifier_env *env)
bpf_prog_lock_ro(func[i]);
bpf_prog_kallsyms_add(func[i]);
}
+
+ /* Last step: make now unused interpreter insns from main
+ * prog consistent for later dump requests, so they can
+ * later look the same as if they were interpreted only.
+ */
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ unsigned long addr;
+
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ insn->off = env->insn_aux_data[i].call_imm;
+ subprog = find_subprog(env, i + insn->off + 1);
+ addr = (unsigned long)func[subprog + 1]->bpf_func;
+ addr &= PAGE_MASK;
+ insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+ addr - __bpf_call_base;
+ }
+
prog->jited = 1;
prog->bpf_func = func[0]->bpf_func;
prog->aux->func = func;
@@ -5427,6 +5500,9 @@ skip_full_check:
sanitize_dead_code(env);
if (ret == 0)
+ ret = check_max_stack_depth(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);