diff options
-rw-r--r-- | include/linux/btf_ids.h | 1 | ||||
-rw-r--r-- | kernel/bpf/Makefile | 1 | ||||
-rw-r--r-- | kernel/bpf/helpers.c | 1 | ||||
-rw-r--r-- | kernel/bpf/kmem_cache_iter.c | 175 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 5 | ||||
-rw-r--r-- | mm/slab_common.c | 19 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c | 115 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/bpf_iter.h | 7 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/kmem_cache_iter.c | 87 |
9 files changed, 411 insertions, 0 deletions
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index c0e3e1426a82..139bdececdcf 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[]; extern u32 bpf_cgroup_btf_id[]; extern u32 bpf_local_storage_map_btf_id[]; extern u32 btf_bpf_map_id[]; +extern u32 bpf_kmem_cache_btf_id[]; #endif diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 9b9c151b5c82..105328f0b9c0 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -52,3 +52,4 @@ obj-$(CONFIG_BPF_PRELOAD) += preload/ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o +obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 4053f279ed4c..aaaefefdf692 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3090,6 +3090,7 @@ BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_get_kmem_cache) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c new file mode 100644 index 000000000000..ebc101d7da51 --- /dev/null +++ b/kernel/bpf/kmem_cache_iter.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Google */ +#include <linux/bpf.h> +#include <linux/btf_ids.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/seq_file.h> + +#include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */ + +struct bpf_iter__kmem_cache { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct kmem_cache *, s); +}; + +static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos) +{ + loff_t cnt = 0; + bool found = false; + struct kmem_cache *s; + + mutex_lock(&slab_mutex); + + /* Find an entry at the given position in the slab_caches list instead + * of keeping a reference (of the last visited entry, if any) out of + * slab_mutex. It might miss something if one is deleted in the middle + * while it releases the lock. But it should be rare and there's not + * much we can do about it. + */ + list_for_each_entry(s, &slab_caches, list) { + if (cnt == *pos) { + /* Make sure this entry remains in the list by getting + * a new reference count. Note that boot_cache entries + * have a negative refcount, so don't touch them. + */ + if (s->refcount > 0) + s->refcount++; + found = true; + break; + } + cnt++; + } + mutex_unlock(&slab_mutex); + + if (!found) + return NULL; + + return s; +} + +static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_iter__kmem_cache ctx = { + .meta = &meta, + .s = v, + }; + struct bpf_prog *prog; + bool destroy = false; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog && !ctx.s) + bpf_iter_run_prog(prog, &ctx); + + if (ctx.s == NULL) + return; + + mutex_lock(&slab_mutex); + + /* Skip kmem_cache_destroy() for active entries */ + if (ctx.s->refcount > 1) + ctx.s->refcount--; + else if (ctx.s->refcount == 1) + destroy = true; + + mutex_unlock(&slab_mutex); + + if (destroy) + kmem_cache_destroy(ctx.s); +} + +static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct kmem_cache *s = v; + struct kmem_cache *next = NULL; + bool destroy = false; + + ++*pos; + + mutex_lock(&slab_mutex); + + if (list_last_entry(&slab_caches, struct kmem_cache, list) != s) { + next = list_next_entry(s, list); + + WARN_ON_ONCE(next->refcount == 0); + + /* boot_caches have negative refcount, don't touch them */ + if (next->refcount > 0) + next->refcount++; + } + + /* Skip kmem_cache_destroy() for active entries */ + if (s->refcount > 1) + s->refcount--; + else if (s->refcount == 1) + destroy = true; + + mutex_unlock(&slab_mutex); + + if (destroy) + kmem_cache_destroy(s); + + return next; +} + +static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_iter__kmem_cache ctx = { + .meta = &meta, + .s = v, + }; + struct bpf_prog *prog; + int ret = 0; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + if (prog) + ret = bpf_iter_run_prog(prog, &ctx); + + return ret; +} + +static const struct seq_operations kmem_cache_iter_seq_ops = { + .start = kmem_cache_iter_seq_start, + .next = kmem_cache_iter_seq_next, + .stop = kmem_cache_iter_seq_stop, + .show = kmem_cache_iter_seq_show, +}; + +BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache) + +static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = { + .seq_ops = &kmem_cache_iter_seq_ops, +}; + +static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux, + struct seq_file *seq) +{ + seq_puts(seq, "kmem_cache iter\n"); +} + +DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta, + struct kmem_cache *s) + +static struct bpf_iter_reg bpf_kmem_cache_reg_info = { + .target = "kmem_cache", + .feature = BPF_ITER_RESCHED, + .show_fdinfo = bpf_iter_kmem_cache_show_fdinfo, + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__kmem_cache, s), + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, + }, + .seq_info = &kmem_cache_iter_seq_info, +}; + +static int __init bpf_kmem_cache_iter_init(void) +{ + bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0]; + return bpf_iter_reg_target(&bpf_kmem_cache_reg_info); +} + +late_initcall(bpf_kmem_cache_iter_init); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cfc62e0776bf..f514247ba8ba 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11259,6 +11259,7 @@ enum special_kfunc_type { KF_bpf_preempt_enable, KF_bpf_iter_css_task_new, KF_bpf_session_cookie, + KF_bpf_get_kmem_cache, }; BTF_SET_START(special_kfunc_set) @@ -11324,6 +11325,7 @@ BTF_ID(func, bpf_session_cookie) #else BTF_ID_UNUSED #endif +BTF_ID(func, bpf_get_kmem_cache) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -12834,6 +12836,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, regs[BPF_REG_0].type = PTR_TO_BTF_ID; regs[BPF_REG_0].btf_id = ptr_type_id; + if (meta.func_id == special_kfunc_list[KF_bpf_get_kmem_cache]) + regs[BPF_REG_0].type |= PTR_UNTRUSTED; + if (is_iter_next_kfunc(&meta)) { struct bpf_reg_state *cur_iter; diff --git a/mm/slab_common.c b/mm/slab_common.c index 744324465615..f97a95289562 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1322,6 +1322,25 @@ size_t ksize(const void *objp) } EXPORT_SYMBOL(ksize); +#ifdef CONFIG_BPF_SYSCALL +#include <linux/btf.h> + +__bpf_kfunc_start_defs(); + +__bpf_kfunc struct kmem_cache *bpf_get_kmem_cache(u64 addr) +{ + struct slab *slab; + + if (!virt_addr_valid((void *)(long)addr)) + return NULL; + + slab = virt_to_slab((void *)(long)addr); + return slab ? slab->slab_cache : NULL; +} + +__bpf_kfunc_end_defs(); +#endif /* CONFIG_BPF_SYSCALL */ + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c new file mode 100644 index 000000000000..848d8fc9171f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> +#include "kmem_cache_iter.skel.h" + +#define SLAB_NAME_MAX 32 + +struct kmem_cache_result { + char name[SLAB_NAME_MAX]; + long obj_size; +}; + +static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .flags = 0, /* Run it with the current task */ + ); + int prog_fd = bpf_program__fd(skel->progs.check_task_struct); + + /* Get task_struct and check it if's from a slab cache */ + ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run"); + + /* The BPF program should set 'found' variable */ + ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found"); +} + +static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel) +{ + FILE *fp; + int map_fd; + char name[SLAB_NAME_MAX]; + unsigned long objsize; + char rest_of_line[1000]; + struct kmem_cache_result r; + int seen = 0; + + fp = fopen("/proc/slabinfo", "r"); + if (fp == NULL) { + /* CONFIG_SLUB_DEBUG is not enabled */ + return; + } + + map_fd = bpf_map__fd(skel->maps.slab_result); + + /* Ignore first two lines for header */ + fscanf(fp, "slabinfo - version: %*d.%*d\n"); + fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line); + + /* Compare name and objsize only - others can be changes frequently */ + while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n", + name, &objsize, rest_of_line) == 3) { + int ret = bpf_map_lookup_elem(map_fd, &seen, &r); + + if (!ASSERT_OK(ret, "kmem_cache_lookup")) + break; + + ASSERT_STREQ(r.name, name, "kmem_cache_name"); + ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize"); + + seen++; + } + + ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq"); + + fclose(fp); +} + +void test_kmem_cache_iter(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct kmem_cache_iter *skel = NULL; + union bpf_iter_link_info linfo = {}; + struct bpf_link *link; + char buf[256]; + int iter_fd; + + skel = kmem_cache_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load")) + return; + + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto destroy; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "iter_create")) + goto free_link; + + memset(buf, 0, sizeof(buf)); + while (read(iter_fd, buf, sizeof(buf) > 0)) { + /* Read out all contents */ + printf("%s", buf); + } + + /* Next reads should return 0 */ + ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read"); + + if (test__start_subtest("check_task_struct")) + subtest_kmem_cache_iter_check_task_struct(skel); + if (test__start_subtest("check_slabinfo")) + subtest_kmem_cache_iter_check_slabinfo(skel); + + close(iter_fd); + +free_link: + bpf_link__destroy(link); +destroy: + kmem_cache_iter__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h index c41ee80533ca..3305dc3a74b3 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -24,6 +24,7 @@ #define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used #define BTF_F_ZERO BTF_F_ZERO___not_used #define bpf_iter__ksym bpf_iter__ksym___not_used +#define bpf_iter__kmem_cache bpf_iter__kmem_cache___not_used #include "vmlinux.h" #undef bpf_iter_meta #undef bpf_iter__bpf_map @@ -48,6 +49,7 @@ #undef BTF_F_PTR_RAW #undef BTF_F_ZERO #undef bpf_iter__ksym +#undef bpf_iter__kmem_cache struct bpf_iter_meta { struct seq_file *seq; @@ -165,3 +167,8 @@ struct bpf_iter__ksym { struct bpf_iter_meta *meta; struct kallsym_iter *ksym; }; + +struct bpf_iter__kmem_cache { + struct bpf_iter_meta *meta; + struct kmem_cache *s; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c new file mode 100644 index 000000000000..72c9dafecd98 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ + +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#define SLAB_NAME_MAX 32 + +struct kmem_cache_result { + char name[SLAB_NAME_MAX]; + long obj_size; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(void *)); + __uint(value_size, SLAB_NAME_MAX); + __uint(max_entries, 1); +} slab_hash SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(struct kmem_cache_result)); + __uint(max_entries, 1024); +} slab_result SEC(".maps"); + +extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym; + +/* Result, will be checked by userspace */ +int task_struct_found; +int kmem_cache_seen; + +SEC("iter/kmem_cache") +int slab_info_collector(struct bpf_iter__kmem_cache *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct kmem_cache *s = ctx->s; + struct kmem_cache_result *r; + int idx; + + if (s) { + /* To make sure if the slab_iter implements the seq interface + * properly and it's also useful for debugging. + */ + BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->size); + + idx = kmem_cache_seen; + r = bpf_map_lookup_elem(&slab_result, &idx); + if (r == NULL) + return 0; + + kmem_cache_seen++; + + /* Save name and size to match /proc/slabinfo */ + bpf_probe_read_kernel_str(r->name, sizeof(r->name), s->name); + r->obj_size = s->size; + + if (!bpf_strncmp(r->name, 11, "task_struct")) + bpf_map_update_elem(&slab_hash, &s, r->name, BPF_NOEXIST); + } + + return 0; +} + +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(check_task_struct) +{ + u64 curr = bpf_get_current_task(); + struct kmem_cache *s; + char *name; + + s = bpf_get_kmem_cache(curr); + if (s == NULL) { + task_struct_found = -1; + return 0; + } + name = bpf_map_lookup_elem(&slab_hash, &s); + if (name && !bpf_strncmp(name, 11, "task_struct")) + task_struct_found = 1; + else + task_struct_found = -2; + return 0; +} |