diff options
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r-- | tools/lib/bpf/libbpf.c | 427 |
1 files changed, 346 insertions, 81 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index c41d9b2b59ac..69cd1a835ebd 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -54,6 +54,7 @@ #include "str_error.h" #include "libbpf_internal.h" #include "hashmap.h" +#include "bpf_gen_internal.h" #ifndef BPF_FS_MAGIC #define BPF_FS_MAGIC 0xcafe4a11 @@ -178,7 +179,7 @@ enum kern_feature_id { __FEAT_CNT, }; -static bool kernel_supports(enum kern_feature_id feat_id); +static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id); enum reloc_type { RELO_LD64, @@ -432,6 +433,8 @@ struct bpf_object { bool loaded; bool has_subcalls; + struct bpf_gen *gen_loader; + /* * Information when doing elf related work. Only valid if fd * is valid. @@ -677,6 +680,11 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return -LIBBPF_ERRNO__FORMAT; } + if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) { + pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); + return -ENOTSUP; + } + pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); @@ -700,13 +708,14 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, if (err) return err; - /* if function is a global/weak symbol, but has hidden - * visibility (STV_HIDDEN), mark its BTF FUNC as static to - * enable more permissive BPF verification mode with more - * outside context available to BPF verifier + /* if function is a global/weak symbol, but has restricted + * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC + * as static to enable more permissive BPF verification mode + * with more outside context available to BPF verifier */ if (GELF_ST_BIND(sym.st_info) != STB_LOCAL - && GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN) + && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN + || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL)) prog->mark_btf_static = true; nr_progs++; @@ -1794,7 +1803,6 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) if (!symbols) return -EINVAL; - scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); data = elf_sec_data(obj, scn); if (!scn || !data) { @@ -1854,6 +1862,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) return -LIBBPF_ERRNO__FORMAT; } + if (GELF_ST_TYPE(sym.st_info) == STT_SECTION + || GELF_ST_BIND(sym.st_info) == STB_LOCAL) { + pr_warn("map '%s' (legacy): static maps are not supported\n", map_name); + return -ENOTSUP; + } + map->libbpf_type = LIBBPF_MAP_UNSPEC; map->sec_idx = sym.st_shndx; map->sec_offset = sym.st_value; @@ -2261,6 +2275,16 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def pr_debug("map '%s': found inner map definition.\n", map->name); } +static const char *btf_var_linkage_str(__u32 linkage) +{ + switch (linkage) { + case BTF_VAR_STATIC: return "static"; + case BTF_VAR_GLOBAL_ALLOCATED: return "global"; + case BTF_VAR_GLOBAL_EXTERN: return "extern"; + default: return "unknown"; + } +} + static int bpf_object__init_user_btf_map(struct bpf_object *obj, const struct btf_type *sec, int var_idx, int sec_idx, @@ -2293,10 +2317,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, map_name, btf_kind_str(var)); return -EINVAL; } - if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && - var_extra->linkage != BTF_VAR_STATIC) { - pr_warn("map '%s': unsupported var linkage %u.\n", - map_name, var_extra->linkage); + if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { + pr_warn("map '%s': unsupported map linkage %s.\n", + map_name, btf_var_linkage_str(var_extra->linkage)); return -EOPNOTSUPP; } @@ -2443,20 +2466,20 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) static bool btf_needs_sanitization(struct bpf_object *obj) { - bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); - bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); - bool has_float = kernel_supports(FEAT_BTF_FLOAT); - bool has_func = kernel_supports(FEAT_BTF_FUNC); + bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); + bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); + bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); return !has_func || !has_datasec || !has_func_global || !has_float; } static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { - bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); - bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); - bool has_float = kernel_supports(FEAT_BTF_FLOAT); - bool has_func = kernel_supports(FEAT_BTF_FUNC); + bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); + bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); + bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); struct btf_type *t; int i, j, vlen; @@ -2637,7 +2660,7 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) int err; /* btf_vmlinux could be loaded earlier */ - if (obj->btf_vmlinux) + if (obj->btf_vmlinux || obj->gen_loader) return 0; if (!force && !obj_needs_vmlinux_btf(obj)) @@ -2662,7 +2685,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) if (!obj->btf) return 0; - if (!kernel_supports(FEAT_BTF)) { + if (!kernel_supports(obj, FEAT_BTF)) { if (kernel_needs_btf(obj)) { err = -EOPNOTSUPP; goto report; @@ -2719,7 +2742,20 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) bpf_object__sanitize_btf(obj, kern_btf); } - err = btf__load(kern_btf); + if (obj->gen_loader) { + __u32 raw_size = 0; + const void *raw_data = btf__get_raw_data(kern_btf, &raw_size); + + if (!raw_data) + return -ENOMEM; + bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); + /* Pretend to have valid FD to pass various fd >= 0 checks. + * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. + */ + btf__set_fd(kern_btf, 0); + } else { + err = btf__load(kern_btf); + } if (sanitize) { if (!err) { /* move fd to libbpf's BTF */ @@ -4293,11 +4329,17 @@ static struct kern_feature_desc { }, }; -static bool kernel_supports(enum kern_feature_id feat_id) +static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) { struct kern_feature_desc *feat = &feature_probes[feat_id]; int ret; + if (obj->gen_loader) + /* To generate loader program assume the latest kernel + * to avoid doing extra prog_load, map_create syscalls. + */ + return true; + if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { ret = feat->probe(); if (ret > 0) { @@ -4380,6 +4422,13 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) char *cp, errmsg[STRERR_BUFSIZE]; int err, zero = 0; + if (obj->gen_loader) { + bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, + map->mmaped, map->def.value_size); + if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) + bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); + return 0; + } err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); if (err) { err = -errno; @@ -4405,14 +4454,14 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) static void bpf_map__destroy(struct bpf_map *map); -static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) +static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { struct bpf_create_map_attr create_attr; struct bpf_map_def *def = &map->def; memset(&create_attr, 0, sizeof(create_attr)); - if (kernel_supports(FEAT_PROG_NAME)) + if (kernel_supports(obj, FEAT_PROG_NAME)) create_attr.name = map->name; create_attr.map_ifindex = map->map_ifindex; create_attr.map_type = def->type; @@ -4453,7 +4502,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) if (map->inner_map) { int err; - err = bpf_object__create_map(obj, map->inner_map); + err = bpf_object__create_map(obj, map->inner_map, true); if (err) { pr_warn("map '%s': failed to create inner map: %d\n", map->name, err); @@ -4465,7 +4514,15 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.inner_map_fd = map->inner_map_fd; } - map->fd = bpf_create_map_xattr(&create_attr); + if (obj->gen_loader) { + bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps); + /* Pretend to have valid FD to pass various fd >= 0 checks. + * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. + */ + map->fd = 0; + } else { + map->fd = bpf_create_map_xattr(&create_attr); + } if (map->fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { char *cp, errmsg[STRERR_BUFSIZE]; @@ -4486,6 +4543,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) return -errno; if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { + if (obj->gen_loader) + map->inner_map->fd = -1; bpf_map__destroy(map->inner_map); zfree(&map->inner_map); } @@ -4493,11 +4552,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) return 0; } -static int init_map_slots(struct bpf_map *map) +static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) { const struct bpf_map *targ_map; unsigned int i; - int fd, err; + int fd, err = 0; for (i = 0; i < map->init_slots_sz; i++) { if (!map->init_slots[i]) @@ -4505,7 +4564,13 @@ static int init_map_slots(struct bpf_map *map) targ_map = map->init_slots[i]; fd = bpf_map__fd(targ_map); - err = bpf_map_update_elem(map->fd, &i, &fd, 0); + if (obj->gen_loader) { + pr_warn("// TODO map_update_elem: idx %ld key %d value==map_idx %ld\n", + map - obj->maps, i, targ_map - obj->maps); + return -ENOTSUP; + } else { + err = bpf_map_update_elem(map->fd, &i, &fd, 0); + } if (err) { err = -errno; pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", @@ -4547,7 +4612,7 @@ bpf_object__create_maps(struct bpf_object *obj) pr_debug("map '%s': skipping creation (preset fd=%d)\n", map->name, map->fd); } else { - err = bpf_object__create_map(obj, map); + err = bpf_object__create_map(obj, map, false); if (err) goto err_out; @@ -4563,7 +4628,7 @@ bpf_object__create_maps(struct bpf_object *obj) } if (map->init_slots_sz) { - err = init_map_slots(map); + err = init_map_slots(obj, map); if (err < 0) { zclose(map->fd); goto err_out; @@ -4973,11 +5038,14 @@ static int load_module_btfs(struct bpf_object *obj) if (obj->btf_modules_loaded) return 0; + if (obj->gen_loader) + return 0; + /* don't do this again, even if we find no module BTFs */ obj->btf_modules_loaded = true; /* kernel too old to support module BTFs */ - if (!kernel_supports(FEAT_MODULE_BTF)) + if (!kernel_supports(obj, FEAT_MODULE_BTF)) return 0; while (true) { @@ -6120,6 +6188,12 @@ static int bpf_core_apply_relo(struct bpf_program *prog, if (str_is_empty(spec_str)) return -EINVAL; + if (prog->obj->gen_loader) { + pr_warn("// TODO core_relo: prog %ld insn[%d] %s %s kind %d\n", + prog - prog->obj->programs, relo->insn_off / 8, + local_name, spec_str, relo->kind); + return -ENOTSUP; + } err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); if (err) { pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", @@ -6371,19 +6445,34 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) switch (relo->type) { case RELO_LD64: - insn[0].src_reg = BPF_PSEUDO_MAP_FD; - insn[0].imm = obj->maps[relo->map_idx].fd; + if (obj->gen_loader) { + insn[0].src_reg = BPF_PSEUDO_MAP_IDX; + insn[0].imm = relo->map_idx; + } else { + insn[0].src_reg = BPF_PSEUDO_MAP_FD; + insn[0].imm = obj->maps[relo->map_idx].fd; + } break; case RELO_DATA: - insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[1].imm = insn[0].imm + relo->sym_off; - insn[0].imm = obj->maps[relo->map_idx].fd; + if (obj->gen_loader) { + insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; + insn[0].imm = relo->map_idx; + } else { + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[relo->map_idx].fd; + } break; case RELO_EXTERN_VAR: ext = &obj->externs[relo->sym_off]; if (ext->type == EXT_KCFG) { - insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; - insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; + if (obj->gen_loader) { + insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; + insn[0].imm = obj->kconfig_map_idx; + } else { + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; + } insn[1].imm = ext->kcfg.data_off; } else /* EXT_KSYM */ { if (ext->ksym.type_id) { /* typed ksyms */ @@ -6402,11 +6491,15 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) insn[0].imm = ext->ksym.kernel_btf_id; break; case RELO_SUBPROG_ADDR: - insn[0].src_reg = BPF_PSEUDO_FUNC; - /* will be handled as a follow up pass */ + if (insn[0].src_reg != BPF_PSEUDO_FUNC) { + pr_warn("prog '%s': relo #%d: bad insn\n", + prog->name, i); + return -EINVAL; + } + /* handled already */ break; case RELO_CALL: - /* will be handled as a follow up pass */ + /* handled already */ break; default: pr_warn("prog '%s': relo #%d: bad relo type %d\n", @@ -6497,7 +6590,7 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj, /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't * supprot func/line info */ - if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) + if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) return 0; /* only attempt func info relocation if main program's func_info @@ -6575,6 +6668,30 @@ static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, si sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); } +static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) +{ + int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; + struct reloc_desc *relos; + int i; + + if (main_prog == subprog) + return 0; + relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); + if (!relos) + return -ENOMEM; + memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, + sizeof(*relos) * subprog->nr_reloc); + + for (i = main_prog->nr_reloc; i < new_cnt; i++) + relos[i].insn_idx += subprog->sub_insn_off; + /* After insn_idx adjustment the 'relos' array is still sorted + * by insn_idx and doesn't break bsearch. + */ + main_prog->reloc_desc = relos; + main_prog->nr_reloc = new_cnt; + return 0; +} + static int bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, struct bpf_program *prog) @@ -6595,6 +6712,11 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, continue; relo = find_prog_insn_relo(prog, insn_idx); + if (relo && relo->type == RELO_EXTERN_FUNC) + /* kfunc relocations will be handled later + * in bpf_object__relocate_data() + */ + continue; if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", prog->name, insn_idx, relo->type); @@ -6669,6 +6791,10 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", main_prog->name, subprog->insns_cnt, subprog->name); + /* The subprog insns are now appended. Append its relos too. */ + err = append_subprog_relos(main_prog, subprog); + if (err) + return err; err = bpf_object__reloc_code(obj, main_prog, subprog); if (err) return err; @@ -6798,11 +6924,25 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) return 0; } +static void +bpf_object__free_relocs(struct bpf_object *obj) +{ + struct bpf_program *prog; + int i; + + /* free up relocation descriptors */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + zfree(&prog->reloc_desc); + prog->nr_reloc = 0; + } +} + static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) { struct bpf_program *prog; - size_t i; + size_t i, j; int err; if (obj->btf_ext) { @@ -6813,23 +6953,32 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) return err; } } - /* relocate data references first for all programs and sub-programs, - * as they don't change relative to code locations, so subsequent - * subprogram processing won't need to re-calculate any of them + + /* Before relocating calls pre-process relocations and mark + * few ld_imm64 instructions that points to subprogs. + * Otherwise bpf_object__reloc_code() later would have to consider + * all ld_imm64 insns as relocation candidates. That would + * reduce relocation speed, since amount of find_prog_insn_relo() + * would increase and most of them will fail to find a relo. */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; - err = bpf_object__relocate_data(obj, prog); - if (err) { - pr_warn("prog '%s': failed to relocate data references: %d\n", - prog->name, err); - return err; + for (j = 0; j < prog->nr_reloc; j++) { + struct reloc_desc *relo = &prog->reloc_desc[j]; + struct bpf_insn *insn = &prog->insns[relo->insn_idx]; + + /* mark the insn, so it's recognized by insn_is_pseudo_func() */ + if (relo->type == RELO_SUBPROG_ADDR) + insn[0].src_reg = BPF_PSEUDO_FUNC; } } - /* now relocate subprogram calls and append used subprograms to main + + /* relocate subprogram calls and append used subprograms to main * programs; each copy of subprogram code needs to be relocated * differently for each main program, because its code location might - * have changed + * have changed. + * Append subprog relos to main programs to allow data relos to be + * processed after text is completely relocated. */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; @@ -6846,12 +6995,20 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) return err; } } - /* free up relocation descriptors */ + /* Process data relos for main programs */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; - zfree(&prog->reloc_desc); - prog->nr_reloc = 0; + if (prog_is_subprog(obj, prog)) + continue; + err = bpf_object__relocate_data(obj, prog); + if (err) { + pr_warn("prog '%s': failed to relocate data references: %d\n", + prog->name, err); + return err; + } } + if (!obj->gen_loader) + bpf_object__free_relocs(obj); return 0; } @@ -7040,6 +7197,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program enum bpf_func_id func_id; int i; + if (obj->gen_loader) + return 0; + for (i = 0; i < prog->insns_cnt; i++, insn++) { if (!insn_is_helper_call(insn, &func_id)) continue; @@ -7051,12 +7211,12 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program switch (func_id) { case BPF_FUNC_probe_read_kernel: case BPF_FUNC_probe_read_user: - if (!kernel_supports(FEAT_PROBE_READ_KERN)) + if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) insn->imm = BPF_FUNC_probe_read; break; case BPF_FUNC_probe_read_kernel_str: case BPF_FUNC_probe_read_user_str: - if (!kernel_supports(FEAT_PROBE_READ_KERN)) + if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) insn->imm = BPF_FUNC_probe_read_str; break; default: @@ -7091,12 +7251,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.prog_type = prog->type; /* old kernels might not support specifying expected_attach_type */ - if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && + if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def && prog->sec_def->is_exp_attach_type_optional) load_attr.expected_attach_type = 0; else load_attr.expected_attach_type = prog->expected_attach_type; - if (kernel_supports(FEAT_PROG_NAME)) + if (kernel_supports(prog->obj, FEAT_PROG_NAME)) load_attr.name = prog->name; load_attr.insns = insns; load_attr.insn_cnt = insns_cnt; @@ -7112,7 +7272,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, /* specify func_info/line_info only if kernel supports them */ btf_fd = bpf_object__btf_fd(prog->obj); - if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) { + if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) { load_attr.prog_btf_fd = btf_fd; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; @@ -7124,6 +7284,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; + if (prog->obj->gen_loader) { + bpf_gen__prog_load(prog->obj->gen_loader, &load_attr, + prog - prog->obj->programs); + *pfd = -1; + return 0; + } retry_load: if (log_buf_size) { log_buf = malloc(log_buf_size); @@ -7142,7 +7308,7 @@ retry_load: pr_debug("verifier log:\n%s", log_buf); if (prog->obj->rodata_map_idx >= 0 && - kernel_supports(FEAT_PROG_BIND_MAP)) { + kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) { struct bpf_map *rodata_map = &prog->obj->maps[prog->obj->rodata_map_idx]; @@ -7201,6 +7367,38 @@ out: return ret; } +static int bpf_program__record_externs(struct bpf_program *prog) +{ + struct bpf_object *obj = prog->obj; + int i; + + for (i = 0; i < prog->nr_reloc; i++) { + struct reloc_desc *relo = &prog->reloc_desc[i]; + struct extern_desc *ext = &obj->externs[relo->sym_off]; + + switch (relo->type) { + case RELO_EXTERN_VAR: + if (ext->type != EXT_KSYM) + continue; + if (!ext->ksym.type_id) { + pr_warn("typeless ksym %s is not supported yet\n", + ext->name); + return -ENOTSUP; + } + bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR, + relo->insn_idx); + break; + case RELO_EXTERN_FUNC: + bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC, + relo->insn_idx); + break; + default: + continue; + } + } + return 0; +} + static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id); int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) @@ -7246,6 +7444,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) pr_warn("prog '%s': inconsistent nr(%d) != 1\n", prog->name, prog->instances.nr); } + if (prog->obj->gen_loader) + bpf_program__record_externs(prog); err = load_program(prog, prog->insns, prog->insns_cnt, license, kern_ver, &fd); if (!err) @@ -7322,6 +7522,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) if (err) return err; } + if (obj->gen_loader) + bpf_object__free_relocs(obj); return 0; } @@ -7500,11 +7702,11 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) bpf_object__for_each_map(m, obj) { if (!bpf_map__is_internal(m)) continue; - if (!kernel_supports(FEAT_GLOBAL_DATA)) { + if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) { pr_warn("kernel doesn't support global data\n"); return -ENOTSUP; } - if (!kernel_supports(FEAT_ARRAY_MMAP)) + if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) m->def.map_flags ^= BPF_F_MMAPABLE; } @@ -7702,6 +7904,12 @@ static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) if (ext->type != EXT_KSYM || !ext->ksym.type_id) continue; + if (obj->gen_loader) { + ext->is_set = true; + ext->ksym.kernel_btf_obj_fd = 0; + ext->ksym.kernel_btf_id = 0; + continue; + } t = btf__type_by_id(obj->btf, ext->btf_id); if (btf_is_var(t)) err = bpf_object__resolve_ksym_var_btf_id(obj, ext); @@ -7816,6 +8024,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) return -EINVAL; } + if (obj->gen_loader) + bpf_gen__init(obj->gen_loader, attr->log_level); + err = bpf_object__probe_loading(obj); err = err ? : bpf_object__load_vmlinux_btf(obj, false); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); @@ -7826,6 +8037,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) err = err ? : bpf_object__relocate(obj, attr->target_btf_path); err = err ? : bpf_object__load_progs(obj, attr->log_level); + if (obj->gen_loader) { + /* reset FDs */ + btf__set_fd(obj->btf, -1); + for (i = 0; i < obj->nr_maps; i++) + obj->maps[i].fd = -1; + if (!err) + err = bpf_gen__finish(obj->gen_loader); + } + /* clean up module BTFs */ for (i = 0; i < obj->btf_module_cnt; i++) { close(obj->btf_modules[i].fd); @@ -8451,6 +8671,7 @@ void bpf_object__close(struct bpf_object *obj) if (obj->clear_priv) obj->clear_priv(obj, obj->priv); + bpf_gen__free(obj->gen_loader); bpf_object__elf_finish(obj); bpf_object__unload(obj); btf__free(obj->btf); @@ -8541,6 +8762,22 @@ void *bpf_object__priv(const struct bpf_object *obj) return obj ? obj->priv : ERR_PTR(-EINVAL); } +int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) +{ + struct bpf_gen *gen; + + if (!opts) + return -EFAULT; + if (!OPTS_VALID(opts, gen_loader_opts)) + return -EINVAL; + gen = calloc(sizeof(*gen), 1); + if (!gen) + return -ENOMEM; + gen->opts = opts; + obj->gen_loader = gen; + return 0; +} + static struct bpf_program * __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, bool forward) @@ -8887,6 +9124,8 @@ static const struct bpf_sec_def section_defs[] = { .expected_attach_type = BPF_TRACE_ITER, .is_attach_btf = true, .attach_fn = attach_iter), + SEC_DEF("syscall", SYSCALL, + .is_sleepable = true), BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, BPF_XDP_DEVMAP), BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, @@ -9176,6 +9415,28 @@ invalid_prog: #define BTF_ITER_PREFIX "bpf_iter_" #define BTF_MAX_NAME_SIZE 128 +void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, + const char **prefix, int *kind) +{ + switch (attach_type) { + case BPF_TRACE_RAW_TP: + *prefix = BTF_TRACE_PREFIX; + *kind = BTF_KIND_TYPEDEF; + break; + case BPF_LSM_MAC: + *prefix = BTF_LSM_PREFIX; + *kind = BTF_KIND_FUNC; + break; + case BPF_TRACE_ITER: + *prefix = BTF_ITER_PREFIX; + *kind = BTF_KIND_FUNC; + break; + default: + *prefix = ""; + *kind = BTF_KIND_FUNC; + } +} + static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, const char *name, __u32 kind) { @@ -9196,21 +9457,11 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, static inline int find_attach_btf_id(struct btf *btf, const char *name, enum bpf_attach_type attach_type) { - int err; + const char *prefix; + int kind; - if (attach_type == BPF_TRACE_RAW_TP) - err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name, - BTF_KIND_TYPEDEF); - else if (attach_type == BPF_LSM_MAC) - err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name, - BTF_KIND_FUNC); - else if (attach_type == BPF_TRACE_ITER) - err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name, - BTF_KIND_FUNC); - else - err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); - - return err; + btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); + return find_btf_by_prefix_kind(btf, prefix, name, kind); } int libbpf_find_vmlinux_btf_id(const char *name, @@ -9309,7 +9560,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, __u32 attach_prog_fd = prog->attach_prog_fd; const char *name = prog->sec_name, *attach_name; const struct bpf_sec_def *sec = NULL; - int i, err; + int i, err = 0; if (!name) return -EINVAL; @@ -9344,7 +9595,13 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, } /* kernel/module BTF ID */ - err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); + if (prog->obj->gen_loader) { + bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); + *btf_obj_fd = 0; + *btf_type_id = 1; + } else { + err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); + } if (err) { pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err); return err; @@ -9501,6 +9758,14 @@ int bpf_map__set_initial_value(struct bpf_map *map, return 0; } +const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize) +{ + if (!map->mmaped) + return NULL; + *psize = map->def.value_size; + return map->mmaped; +} + bool bpf_map__is_offload_neutral(const struct bpf_map *map) { return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; |