diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
81 files changed, 5174 insertions, 467 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index dd5d69529382..7470327edcfe 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -22,6 +22,7 @@ test_lirc_mode2_user get_cgroup_id_user test_skb_cgroup_id_user test_socket_cookie +test_cgroup_attach test_cgroup_storage test_select_reuseport test_flow_dissector @@ -35,3 +36,6 @@ test_sysctl alu32 libbpf.pc libbpf.so.* +test_hashmap +test_btf_dump +xdping diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index e36356e2377e..fb5ce43e28b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -15,7 +15,9 @@ LLC ?= llc LLVM_OBJCOPY ?= llvm-objcopy LLVM_READELF ?= llvm-readelf BTF_PAHOLE ?= pahole -CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include +CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include \ + -Dbpf_prog_load=bpf_prog_test_load \ + -Dbpf_load_program=bpf_test_load_program LDLIBS += -lcap -lelf -lrt -lpthread # Order correspond to 'make run_tests' order @@ -23,7 +25,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ test_cgroup_storage test_select_reuseport test_section_names \ - test_netcnt test_tcpnotify_user test_sock_fields test_sysctl + test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ + test_btf_dump test_cgroup_attach xdping BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) TEST_GEN_FILES = $(BPF_OBJ_FILES) @@ -54,7 +57,8 @@ TEST_PROGS := test_kmod.sh \ test_lwt_ip_encap.sh \ test_tcp_check_syncookie.sh \ test_tc_tunnel.sh \ - test_tc_edt.sh + test_tc_edt.sh \ + test_xdping.sh TEST_PROGS_EXTENDED := with_addr.sh \ with_tunnels.sh \ @@ -79,9 +83,9 @@ $(OUTPUT)/test_maps: map_tests/*.c BPFOBJ := $(OUTPUT)/libbpf.a -$(TEST_GEN_PROGS): $(BPFOBJ) +$(TEST_GEN_PROGS): test_stub.o $(BPFOBJ) -$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a +$(TEST_GEN_PROGS_EXTENDED): test_stub.o $(OUTPUT)/libbpf.a $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c $(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c @@ -97,6 +101,7 @@ $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c $(OUTPUT)/test_netcnt: cgroup_helpers.c $(OUTPUT)/test_sock_fields: cgroup_helpers.c $(OUTPUT)/test_sysctl: cgroup_helpers.c +$(OUTPUT)/test_cgroup_attach: cgroup_helpers.c .PHONY: force @@ -177,7 +182,7 @@ $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\ $(ALU32_BUILD_DIR)/urandom_read $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \ -o $(ALU32_BUILD_DIR)/test_progs_32 \ - test_progs.c trace_helpers.c prog_tests/*.c \ + test_progs.c test_stub.c trace_helpers.c prog_tests/*.c \ $(OUTPUT)/libbpf.a $(LDLIBS) $(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H) @@ -275,4 +280,5 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES) ) > $(VERIFIER_TESTS_H)) EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \ - $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) + $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \ + feature diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h index b25595ea4a78..05f036df8a4c 100644 --- a/tools/testing/selftests/bpf/bpf_endian.h +++ b/tools/testing/selftests/bpf/bpf_endian.h @@ -2,6 +2,7 @@ #ifndef __BPF_ENDIAN__ #define __BPF_ENDIAN__ +#include <linux/stddef.h> #include <linux/swab.h> /* LLVM's BPF target selects the endianness of the CPU diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 5f6f9e7aba2a..1a5b1accf091 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -8,6 +8,14 @@ */ #define SEC(NAME) __attribute__((section(NAME), used)) +/* helper macro to print out debug messages */ +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + /* helper functions called from eBPF programs written in C */ static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) BPF_FUNC_map_lookup_elem; @@ -23,7 +31,7 @@ static int (*bpf_map_pop_elem)(void *map, void *value) = (void *) BPF_FUNC_map_pop_elem; static int (*bpf_map_peek_elem)(void *map, void *value) = (void *) BPF_FUNC_map_peek_elem; -static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) = +static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) = (void *) BPF_FUNC_probe_read; static unsigned long long (*bpf_ktime_get_ns)(void) = (void *) BPF_FUNC_ktime_get_ns; @@ -54,7 +62,7 @@ static int (*bpf_perf_event_output)(void *ctx, void *map, (void *) BPF_FUNC_perf_event_output; static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = (void *) BPF_FUNC_get_stackid; -static int (*bpf_probe_write_user)(void *dst, void *src, int size) = +static int (*bpf_probe_write_user)(void *dst, const void *src, int size) = (void *) BPF_FUNC_probe_write_user; static int (*bpf_current_task_under_cgroup)(void *map, int index) = (void *) BPF_FUNC_current_task_under_cgroup; @@ -216,6 +224,7 @@ static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk, (void *) BPF_FUNC_sk_storage_get; static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) = (void *)BPF_FUNC_sk_storage_delete; +static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index a29206ebbd13..ec219f84e041 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -6,44 +6,17 @@ #include <stdlib.h> #include <string.h> #include <errno.h> +#include <libbpf.h> /* libbpf_num_possible_cpus */ static inline unsigned int bpf_num_possible_cpus(void) { - static const char *fcpu = "/sys/devices/system/cpu/possible"; - unsigned int start, end, possible_cpus = 0; - char buff[128]; - FILE *fp; - int len, n, i, j = 0; + int possible_cpus = libbpf_num_possible_cpus(); - fp = fopen(fcpu, "r"); - if (!fp) { - printf("Failed to open %s: '%s'!\n", fcpu, strerror(errno)); + if (possible_cpus < 0) { + printf("Failed to get # of possible cpus: '%s'!\n", + strerror(-possible_cpus)); exit(1); } - - if (!fgets(buff, sizeof(buff), fp)) { - printf("Failed to read %s!\n", fcpu); - exit(1); - } - - len = strlen(buff); - for (i = 0; i <= len; i++) { - if (buff[i] == ',' || buff[i] == '\0') { - buff[i] = '\0'; - n = sscanf(&buff[j], "%u-%u", &start, &end); - if (n <= 0) { - printf("Failed to retrieve # possible CPUs!\n"); - exit(1); - } else if (n == 1) { - end = start; - } - possible_cpus += end - start + 1; - j = i + 1; - } - } - - fclose(fp); - return possible_cpus; } diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 6692a40a6979..e95c33e333a4 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -34,6 +34,60 @@ CGROUP_WORK_DIR, path) /** + * enable_all_controllers() - Enable all available cgroup v2 controllers + * + * Enable all available cgroup v2 controllers in order to increase + * the code coverage. + * + * If successful, 0 is returned. + */ +int enable_all_controllers(char *cgroup_path) +{ + char path[PATH_MAX + 1]; + char buf[PATH_MAX]; + char *c, *c2; + int fd, cfd; + ssize_t len; + + snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path); + fd = open(path, O_RDONLY); + if (fd < 0) { + log_err("Opening cgroup.controllers: %s", path); + return 1; + } + + len = read(fd, buf, sizeof(buf) - 1); + if (len < 0) { + close(fd); + log_err("Reading cgroup.controllers: %s", path); + return 1; + } + buf[len] = 0; + close(fd); + + /* No controllers available? We're probably on cgroup v1. */ + if (len == 0) + return 0; + + snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path); + cfd = open(path, O_RDWR); + if (cfd < 0) { + log_err("Opening cgroup.subtree_control: %s", path); + return 1; + } + + for (c = strtok_r(buf, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) { + if (dprintf(cfd, "+%s\n", c) <= 0) { + log_err("Enabling controller %s: %s", c, path); + close(cfd); + return 1; + } + } + close(cfd); + return 0; +} + +/** * setup_cgroup_environment() - Setup the cgroup environment * * After calling this function, cleanup_cgroup_environment should be called @@ -71,6 +125,9 @@ int setup_cgroup_environment(void) return 1; } + if (enable_all_controllers(cgroup_workdir)) + return 1; + return 0; } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index b74e2f6e96d0..e1b55261526f 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -5,14 +5,14 @@ static int libbpf_debug_print(enum libbpf_print_level level, const char *format, va_list args) { if (level != LIBBPF_DEBUG) - return 0; + return vfprintf(stderr, format, args); if (!strstr(format, "verifier log")) return 0; return vfprintf(stderr, "%s", args); } -static int check_load(const char *file) +static int check_load(const char *file, enum bpf_prog_type type) { struct bpf_prog_load_attr attr; struct bpf_object *obj = NULL; @@ -20,8 +20,9 @@ static int check_load(const char *file) memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); attr.file = file; - attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; + attr.prog_type = type; attr.log_level = 4; + attr.prog_flags = BPF_F_TEST_RND_HI32; err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); bpf_object__close(obj); if (err) @@ -31,19 +32,69 @@ static int check_load(const char *file) void test_bpf_verif_scale(void) { - const char *file1 = "./test_verif_scale1.o"; - const char *file2 = "./test_verif_scale2.o"; - const char *file3 = "./test_verif_scale3.o"; - int err; + const char *sched_cls[] = { + "./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o", + }; + const char *raw_tp[] = { + /* full unroll by llvm */ + "./pyperf50.o", "./pyperf100.o", "./pyperf180.o", + + /* partial unroll. llvm will unroll loop ~150 times. + * C loop count -> 600. + * Asm loop count -> 4. + * 16k insns in loop body. + * Total of 5 such loops. Total program size ~82k insns. + */ + "./pyperf600.o", + + /* no unroll at all. + * C loop count -> 600. + * ASM loop count -> 600. + * ~110 insns in loop body. + * Total of 5 such loops. Total program size ~1500 insns. + */ + "./pyperf600_nounroll.o", + + "./loop1.o", "./loop2.o", + + /* partial unroll. 19k insn in a loop. + * Total program size 20.8k insn. + * ~350k processed_insns + */ + "./strobemeta.o", + + /* no unroll, tiny loops */ + "./strobemeta_nounroll1.o", + "./strobemeta_nounroll2.o", + }; + const char *cg_sysctl[] = { + "./test_sysctl_loop1.o", "./test_sysctl_loop2.o", + }; + int err, i; if (verifier_stats) libbpf_set_print(libbpf_debug_print); - err = check_load(file1); - err |= check_load(file2); - err |= check_load(file3); - if (!err) - printf("test_verif_scale:OK\n"); - else - printf("test_verif_scale:FAIL\n"); + err = check_load("./loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT); + printf("test_scale:loop3:%s\n", err ? (error_cnt--, "OK") : "FAIL"); + + for (i = 0; i < ARRAY_SIZE(sched_cls); i++) { + err = check_load(sched_cls[i], BPF_PROG_TYPE_SCHED_CLS); + printf("test_scale:%s:%s\n", sched_cls[i], err ? "FAIL" : "OK"); + } + + for (i = 0; i < ARRAY_SIZE(raw_tp); i++) { + err = check_load(raw_tp[i], BPF_PROG_TYPE_RAW_TRACEPOINT); + printf("test_scale:%s:%s\n", raw_tp[i], err ? "FAIL" : "OK"); + } + + for (i = 0; i < ARRAY_SIZE(cg_sysctl); i++) { + err = check_load(cg_sysctl[i], BPF_PROG_TYPE_CGROUP_SYSCTL); + printf("test_scale:%s:%s\n", cg_sysctl[i], err ? "FAIL" : "OK"); + } + err = check_load("./test_xdp_loop.o", BPF_PROG_TYPE_XDP); + printf("test_scale:test_xdp_loop:%s\n", err ? "FAIL" : "OK"); + + err = check_load("./test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL); + printf("test_scale:test_seg6_loop:%s\n", err ? "FAIL" : "OK"); } diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c new file mode 100644 index 000000000000..67cea1686305 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static volatile int sigusr1_received = 0; + +static void sigusr1_handler(int signum) +{ + sigusr1_received++; +} + +static int test_send_signal_common(struct perf_event_attr *attr, + int prog_type, + const char *test_name) +{ + int err = -1, pmu_fd, prog_fd, info_map_fd, status_map_fd; + const char *file = "./test_send_signal_kern.o"; + struct bpf_object *obj = NULL; + int pipe_c2p[2], pipe_p2c[2]; + __u32 key = 0, duration = 0; + char buf[256]; + pid_t pid; + __u64 val; + + if (CHECK(pipe(pipe_c2p), test_name, + "pipe pipe_c2p error: %s\n", strerror(errno))) + goto no_fork_done; + + if (CHECK(pipe(pipe_p2c), test_name, + "pipe pipe_p2c error: %s\n", strerror(errno))) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + goto no_fork_done; + } + + pid = fork(); + if (CHECK(pid < 0, test_name, "fork error: %s\n", strerror(errno))) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + close(pipe_p2c[0]); + close(pipe_p2c[1]); + goto no_fork_done; + } + + if (pid == 0) { + /* install signal handler and notify parent */ + signal(SIGUSR1, sigusr1_handler); + + close(pipe_c2p[0]); /* close read */ + close(pipe_p2c[1]); /* close write */ + + /* notify parent signal handler is installed */ + write(pipe_c2p[1], buf, 1); + + /* make sure parent enabled bpf program to send_signal */ + read(pipe_p2c[0], buf, 1); + + /* wait a little for signal handler */ + sleep(1); + + if (sigusr1_received) + write(pipe_c2p[1], "2", 1); + else + write(pipe_c2p[1], "0", 1); + + /* wait for parent notification and exit */ + read(pipe_p2c[0], buf, 1); + + close(pipe_c2p[1]); + close(pipe_p2c[0]); + exit(0); + } + + close(pipe_c2p[1]); /* close write */ + close(pipe_p2c[0]); /* close read */ + + err = bpf_prog_load(file, prog_type, &obj, &prog_fd); + if (CHECK(err < 0, test_name, "bpf_prog_load error: %s\n", + strerror(errno))) + goto prog_load_failure; + + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, + -1 /* group id */, 0 /* flags */); + if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n", + strerror(errno))) { + err = -1; + goto close_prog; + } + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_enable error: %s\n", + strerror(errno))) + goto disable_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_set_bpf error: %s\n", + strerror(errno))) + goto disable_pmu; + + err = -1; + info_map_fd = bpf_object__find_map_fd_by_name(obj, "info_map"); + if (CHECK(info_map_fd < 0, test_name, "find map %s error\n", "info_map")) + goto disable_pmu; + + status_map_fd = bpf_object__find_map_fd_by_name(obj, "status_map"); + if (CHECK(status_map_fd < 0, test_name, "find map %s error\n", "status_map")) + goto disable_pmu; + + /* wait until child signal handler installed */ + read(pipe_c2p[0], buf, 1); + + /* trigger the bpf send_signal */ + key = 0; + val = (((__u64)(SIGUSR1)) << 32) | pid; + bpf_map_update_elem(info_map_fd, &key, &val, 0); + + /* notify child that bpf program can send_signal now */ + write(pipe_p2c[1], buf, 1); + + /* wait for result */ + err = read(pipe_c2p[0], buf, 1); + if (CHECK(err < 0, test_name, "reading pipe error: %s\n", strerror(errno))) + goto disable_pmu; + if (CHECK(err == 0, test_name, "reading pipe error: size 0\n")) { + err = -1; + goto disable_pmu; + } + + err = CHECK(buf[0] != '2', test_name, "incorrect result\n"); + + /* notify child safe to exit */ + write(pipe_p2c[1], buf, 1); + +disable_pmu: + close(pmu_fd); +close_prog: + bpf_object__close(obj); +prog_load_failure: + close(pipe_c2p[0]); + close(pipe_p2c[1]); + wait(NULL); +no_fork_done: + return err; +} + +static int test_send_signal_tracepoint(void) +{ + const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; + struct perf_event_attr attr = { + .type = PERF_TYPE_TRACEPOINT, + .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN, + .sample_period = 1, + .wakeup_events = 1, + }; + __u32 duration = 0; + int bytes, efd; + char buf[256]; + + efd = open(id_path, O_RDONLY, 0); + if (CHECK(efd < 0, "tracepoint", + "open syscalls/sys_enter_nanosleep/id failure: %s\n", + strerror(errno))) + return -1; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint", + "read syscalls/sys_enter_nanosleep/id failure: %s\n", + strerror(errno))) + return -1; + + attr.config = strtol(buf, NULL, 0); + + return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint"); +} + +static int test_send_signal_nmi(void) +{ + struct perf_event_attr attr = { + .sample_freq = 50, + .freq = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + + return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, "perf_event"); +} + +void test_send_signal(void) +{ + int ret = 0; + + ret |= test_send_signal_tracepoint(); + ret |= test_send_signal_nmi(); + if (!ret) + printf("test_send_signal:OK\n"); + else + printf("test_send_signal:FAIL\n"); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 81ad9a0b29d0..849f42e548b5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -57,17 +57,25 @@ struct frag_hdr { __be32 identification; }; -struct bpf_map_def SEC("maps") jmp_table = { +struct { + __u32 type; + __u32 max_entries; + __u32 key_size; + __u32 value_size; +} jmp_table SEC(".maps") = { .type = BPF_MAP_TYPE_PROG_ARRAY, + .max_entries = 8, .key_size = sizeof(__u32), .value_size = sizeof(__u32), - .max_entries = 8 }; -struct bpf_map_def SEC("maps") last_dissection = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct bpf_flow_keys *value; +} last_dissection SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct bpf_flow_keys), .max_entries = 1, }; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c new file mode 100644 index 000000000000..8f44767a75fa --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper tests for bitfield. + * + * Copyright (c) 2019 Facebook + */ +#include <stdbool.h> + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct bitfields_only_mixed_types { + * int a: 3; + * long int b: 2; + * _Bool c: 1; + * enum { + * A = 0, + * B = 1, + * } d: 1; + * short e: 5; + * int: 20; + * unsigned int f: 30; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct bitfields_only_mixed_types { + int a: 3; + long int b: 2; + bool c: 1; /* it's really a _Bool type */ + enum { + A, /* A = 0, dumper is very explicit */ + B, /* B = 1, same */ + } d: 1; + short e: 5; + /* 20-bit padding here */ + unsigned f: 30; /* this gets aligned on 4-byte boundary */ +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct bitfield_mixed_with_others { + * char: 4; + * int a: 4; + * short b; + * long int c; + * long int d: 8; + * int e; + * int f; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ +struct bitfield_mixed_with_others { + long: 4; /* char is enough as a backing field */ + int a: 4; + /* 8-bit implicit padding */ + short b; /* combined with previous bitfield */ + /* 4 more bytes of implicit padding */ + long c; + long d: 8; + /* 24 bits implicit padding */ + int e; /* combined with previous bitfield */ + int f; + /* 4 bytes of padding */ +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct bitfield_flushed { + * int a: 4; + * long: 60; + * long int b: 16; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ +struct bitfield_flushed { + int a: 4; + long: 0; /* flush until next natural alignment boundary */ + long b: 16; +}; + +int f(struct { + struct bitfields_only_mixed_types _1; + struct bitfield_mixed_with_others _2; + struct bitfield_flushed _3; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c new file mode 100644 index 000000000000..ba97165bdb28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test for multi-dimensional array output. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +typedef int arr_t[2]; + +typedef int multiarr_t[3][4][5]; + +typedef int *ptr_arr_t[6]; + +typedef int *ptr_multiarr_t[7][8][9][10]; + +typedef int * (*fn_ptr_arr_t[11])(); + +typedef int * (*fn_ptr_multiarr_t[12][13])(); + +struct root_struct { + arr_t _1; + multiarr_t _2; + ptr_arr_t _3; + ptr_multiarr_t _4; + fn_ptr_arr_t _5; + fn_ptr_multiarr_t _6; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + +int f(struct root_struct *s) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c new file mode 100644 index 000000000000..92a4ad428710 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test validating no name versioning happens between + * independent C namespaces (struct/union/enum vs typedef/enum values). + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct S { + int S; + int U; +}; + +typedef struct S S; + +union U { + int S; + int U; +}; + +typedef union U U; + +enum E { + V = 0, +}; + +typedef enum E E; + +struct A {}; + +union B {}; + +enum C { + A = 1, + B = 2, + C = 3, +}; + +struct X {}; + +union Y {}; + +enum Z; + +typedef int X; + +typedef int Y; + +typedef int Z; + +/*------ END-EXPECTED-OUTPUT ------ */ + +int f(struct { + struct S _1; + S _2; + union U _3; + U _4; + enum E _5; + E _6; + struct A a; + union B b; + enum C c; + struct X x; + union Y y; + enum Z *z; + X xx; + Y yy; + Z zz; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c new file mode 100644 index 000000000000..7c95702ee4cb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test for topological sorting of dependent structs. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct s1 {}; + +struct s3; + +struct s4; + +struct s2 { + struct s2 *s2; + struct s3 *s3; + struct s4 *s4; +}; + +struct s3 { + struct s1 s1; + struct s2 s2; +}; + +struct s4 { + struct s1 s1; + struct s3 s3; +}; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct hlist_head { + struct hlist_node *first; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct root_struct { + struct s4 s4; + struct list_head l; + struct hlist_node n; + struct hlist_head h; + struct callback_head cb; +}; + +/*------ END-EXPECTED-OUTPUT ------ */ + +int f(struct root_struct *root) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c new file mode 100644 index 000000000000..1cef3bec1dc7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper tests for struct packing determination. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct packed_trailing_space { + int a; + short b; +} __attribute__((packed)); + +struct non_packed_trailing_space { + int a; + short b; +}; + +struct packed_fields { + short a; + int b; +} __attribute__((packed)); + +struct non_packed_fields { + short a; + int b; +}; + +struct nested_packed { + char: 4; + int a: 4; + long int b; + struct { + char c; + int d; + } __attribute__((packed)) e; +} __attribute__((packed)); + +union union_is_never_packed { + int a: 4; + char b; + char c: 1; +}; + +union union_does_not_need_packing { + struct { + long int a; + int b; + } __attribute__((packed)); + int c; +}; + +union jump_code_union { + char code[5]; + struct { + char jump; + int offset; + } __attribute__((packed)); +}; + +/*------ END-EXPECTED-OUTPUT ------ */ + +int f(struct { + struct packed_trailing_space _1; + struct non_packed_trailing_space _2; + struct packed_fields _3; + struct non_packed_fields _4; + struct nested_packed _5; + union union_is_never_packed _6; + union union_does_not_need_packing _7; + union jump_code_union _8; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c new file mode 100644 index 000000000000..3a62119c7498 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper tests for implicit and explicit padding between fields and + * at the end of a struct. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +struct padded_implicitly { + int a; + long int b; + char c; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct padded_explicitly { + * int a; + * int: 32; + * int b; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct padded_explicitly { + int a; + int: 1; /* algo will explicitly pad with full 32 bits here */ + int b; +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct padded_a_lot { + * int a; + * long: 32; + * long: 64; + * long: 64; + * int b; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct padded_a_lot { + int a; + /* 32 bit of implicit padding here, which algo will make explicit */ + long: 64; + long: 64; + int b; +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct padded_cache_line { + * int a; + * long: 32; + * long: 64; + * long: 64; + * long: 64; + * int b; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct padded_cache_line { + int a; + int b __attribute__((aligned(32))); +}; + +/* ----- START-EXPECTED-OUTPUT ----- */ +/* + *struct zone_padding { + * char x[0]; + *}; + * + *struct zone { + * int a; + * short b; + * short: 16; + * struct zone_padding __pad__; + *}; + * + */ +/* ------ END-EXPECTED-OUTPUT ------ */ + +struct zone_padding { + char x[0]; +} __attribute__((__aligned__(8))); + +struct zone { + int a; + short b; + short: 16; + struct zone_padding __pad__; +}; + +int f(struct { + struct padded_implicitly _1; + struct padded_explicitly _2; + struct padded_a_lot _3; + struct padded_cache_line _4; + struct zone _5; +} *_) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c new file mode 100644 index 000000000000..d4a02fe44a12 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C dumper test for majority of C syntax quirks. + * + * Copyright (c) 2019 Facebook + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +enum e1 { + A = 0, + B = 1, +}; + +enum e2 { + C = 100, + D = -100, + E = 0, +}; + +typedef enum e2 e2_t; + +typedef enum { + F = 0, + G = 1, + H = 2, +} e3_t; + +typedef int int_t; + +typedef volatile const int * volatile const crazy_ptr_t; + +typedef int *****we_need_to_go_deeper_ptr_t; + +typedef volatile const we_need_to_go_deeper_ptr_t * restrict * volatile * const * restrict volatile * restrict const * volatile const * restrict volatile const how_about_this_ptr_t; + +typedef int *ptr_arr_t[10]; + +typedef void (*fn_ptr1_t)(int); + +typedef void (*printf_fn_t)(const char *, ...); + +/* ------ END-EXPECTED-OUTPUT ------ */ +/* + * While previous function pointers are pretty trivial (C-syntax-level + * trivial), the following are deciphered here for future generations: + * + * - `fn_ptr2_t`: function, taking anonymous struct as a first arg and pointer + * to a function, that takes int and returns int, as a second arg; returning + * a pointer to a const pointer to a char. Equivalent to: + * typedef struct { int a; } s_t; + * typedef int (*fn_t)(int); + * typedef char * const * (*fn_ptr2_t)(s_t, fn_t); + * + * - `fn_complext_t`: pointer to a function returning struct and accepting + * union and struct. All structs and enum are anonymous and defined inline. + * + * - `signal_t: pointer to a function accepting a pointer to a function as an + * argument and returning pointer to a function as a result. Sane equivalent: + * typedef void (*signal_handler_t)(int); + * typedef signal_handler_t (*signal_ptr_t)(int, signal_handler_t); + * + * - fn_ptr_arr1_t: array of pointers to a function accepting pointer to + * a pointer to an int and returning pointer to a char. Easy. + * + * - fn_ptr_arr2_t: array of const pointers to a function taking no arguments + * and returning a const pointer to a function, that takes pointer to a + * `int -> char *` function and returns pointer to a char. Equivalent: + * typedef char * (*fn_input_t)(int); + * typedef char * (*fn_output_outer_t)(fn_input_t); + * typedef const fn_output_outer_t (* fn_output_inner_t)(); + * typedef const fn_output_inner_t fn_ptr_arr2_t[5]; + */ +/* ----- START-EXPECTED-OUTPUT ----- */ +typedef char * const * (*fn_ptr2_t)(struct { + int a; +}, int (*)(int)); + +typedef struct { + int a; + void (*b)(int, struct { + int c; + }, union { + char d; + int e[5]; + }); +} (*fn_complex_t)(union { + void *f; + char g[16]; +}, struct { + int h; +}); + +typedef void (* (*signal_t)(int, void (*)(int)))(int); + +typedef char * (*fn_ptr_arr1_t[10])(int **); + +typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int)); + +struct struct_w_typedefs { + int_t a; + crazy_ptr_t b; + we_need_to_go_deeper_ptr_t c; + how_about_this_ptr_t d; + ptr_arr_t e; + fn_ptr1_t f; + printf_fn_t g; + fn_ptr2_t h; + fn_complex_t i; + signal_t j; + fn_ptr_arr1_t k; + fn_ptr_arr2_t l; +}; + +typedef struct { + int x; + int y; + int z; +} anon_struct_t; + +struct struct_fwd; + +typedef struct struct_fwd struct_fwd_t; + +typedef struct struct_fwd *struct_fwd_ptr_t; + +union union_fwd; + +typedef union union_fwd union_fwd_t; + +typedef union union_fwd *union_fwd_ptr_t; + +struct struct_empty {}; + +struct struct_simple { + int a; + char b; + const int_t *p; + struct struct_empty s; + enum e2 e; + enum { + ANON_VAL1 = 1, + ANON_VAL2 = 2, + } f; + int arr1[13]; + enum e2 arr2[5]; +}; + +union union_empty {}; + +union union_simple { + void *ptr; + int num; + int_t num2; + union union_empty u; +}; + +struct struct_in_struct { + struct struct_simple simple; + union union_simple also_simple; + struct { + int a; + } not_so_hard_as_well; + union { + int b; + int c; + } anon_union_is_good; + struct { + int d; + int e; + }; + union { + int f; + int g; + }; +}; + +struct struct_with_embedded_stuff { + int a; + struct { + int b; + struct { + struct struct_with_embedded_stuff *c; + const char *d; + } e; + union { + volatile long int f; + void * restrict g; + }; + }; + union { + const int_t *h; + void (*i)(char, int, void *); + } j; + enum { + K = 100, + L = 200, + } m; + char n[16]; + struct { + char o; + int p; + void (*q)(int); + } r[5]; + struct struct_in_struct s[10]; + int t[11]; +}; + +struct root_struct { + enum e1 _1; + enum e2 _2; + e2_t _2_1; + e3_t _2_2; + struct struct_w_typedefs _3; + anon_struct_t _7; + struct struct_fwd *_8; + struct_fwd_t *_9; + struct_fwd_ptr_t _10; + union union_fwd *_11; + union_fwd_t *_12; + union_fwd_ptr_t _13; + struct struct_with_embedded_stuff _14; +}; + +/* ------ END-EXPECTED-OUTPUT ------ */ + +int f(struct root_struct *s) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/loop1.c b/tools/testing/selftests/bpf/progs/loop1.c new file mode 100644 index 000000000000..dea395af9ea9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/loop1.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +SEC("raw_tracepoint/kfree_skb") +int nested_loops(volatile struct pt_regs* ctx) +{ + int i, j, sum = 0, m; + + for (j = 0; j < 300; j++) + for (i = 0; i < j; i++) { + if (j & 1) + m = ctx->rax; + else + m = j; + sum += i * m; + } + + return sum; +} diff --git a/tools/testing/selftests/bpf/progs/loop2.c b/tools/testing/selftests/bpf/progs/loop2.c new file mode 100644 index 000000000000..0637bd8e8bcf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/loop2.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +SEC("raw_tracepoint/consume_skb") +int while_true(volatile struct pt_regs* ctx) +{ + int i = 0; + + while (true) { + if (ctx->rax & 1) + i += 3; + else + i += 7; + if (i > 40) + break; + } + + return i; +} diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c new file mode 100644 index 000000000000..30a0f6cba080 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/loop3.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +SEC("raw_tracepoint/consume_skb") +int while_true(volatile struct pt_regs* ctx) +{ + __u64 i = 0, sum = 0; + do { + i++; + sum += ctx->rax; + } while (i < 0x100000000ULL); + return sum; +} diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c index 9f741e69cebe..a25c82a5b7c8 100644 --- a/tools/testing/selftests/bpf/progs/netcnt_prog.c +++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c @@ -10,24 +10,22 @@ #define REFRESH_TIME_NS 100000000 #define NS_PER_SEC 1000000000 -struct bpf_map_def SEC("maps") percpu_netcnt = { +struct { + __u32 type; + struct bpf_cgroup_storage_key *key; + struct percpu_net_cnt *value; +} percpu_netcnt SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - .key_size = sizeof(struct bpf_cgroup_storage_key), - .value_size = sizeof(struct percpu_net_cnt), }; -BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key, - struct percpu_net_cnt); - -struct bpf_map_def SEC("maps") netcnt = { +struct { + __u32 type; + struct bpf_cgroup_storage_key *key; + struct net_cnt *value; +} netcnt SEC(".maps") = { .type = BPF_MAP_TYPE_CGROUP_STORAGE, - .key_size = sizeof(struct bpf_cgroup_storage_key), - .value_size = sizeof(struct net_cnt), }; -BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key, - struct net_cnt); - SEC("cgroup/skb") int bpf_nextcnt(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h new file mode 100644 index 000000000000..6b0781391be5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> +#include <linux/bpf.h> +#include "bpf_helpers.h" + +#define FUNCTION_NAME_LEN 64 +#define FILE_NAME_LEN 128 +#define TASK_COMM_LEN 16 + +typedef struct { + int PyThreadState_frame; + int PyThreadState_thread; + int PyFrameObject_back; + int PyFrameObject_code; + int PyFrameObject_lineno; + int PyCodeObject_filename; + int PyCodeObject_name; + int String_data; + int String_size; +} OffsetConfig; + +typedef struct { + uintptr_t current_state_addr; + uintptr_t tls_key_addr; + OffsetConfig offsets; + bool use_tls; +} PidData; + +typedef struct { + uint32_t success; +} Stats; + +typedef struct { + char name[FUNCTION_NAME_LEN]; + char file[FILE_NAME_LEN]; +} Symbol; + +typedef struct { + uint32_t pid; + uint32_t tid; + char comm[TASK_COMM_LEN]; + int32_t kernel_stack_id; + int32_t user_stack_id; + bool thread_current; + bool pthread_match; + bool stack_complete; + int16_t stack_len; + int32_t stack[STACK_MAX_LEN]; + + int has_meta; + int metadata; + char dummy_safeguard; +} Event; + + +struct bpf_elf_map { + __u32 type; + __u32 size_key; + __u32 size_value; + __u32 max_elem; + __u32 flags; +}; + +typedef int pid_t; + +typedef struct { + void* f_back; // PyFrameObject.f_back, previous frame + void* f_code; // PyFrameObject.f_code, pointer to PyCodeObject + void* co_filename; // PyCodeObject.co_filename + void* co_name; // PyCodeObject.co_name +} FrameData; + +static inline __attribute__((__always_inline__)) void* +get_thread_state(void* tls_base, PidData* pidData) +{ + void* thread_state; + int key; + + bpf_probe_read(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); + bpf_probe_read(&thread_state, sizeof(thread_state), + tls_base + 0x310 + key * 0x10 + 0x08); + return thread_state; +} + +static inline __attribute__((__always_inline__)) bool +get_frame_data(void* frame_ptr, PidData* pidData, FrameData* frame, Symbol* symbol) +{ + // read data from PyFrameObject + bpf_probe_read(&frame->f_back, + sizeof(frame->f_back), + frame_ptr + pidData->offsets.PyFrameObject_back); + bpf_probe_read(&frame->f_code, + sizeof(frame->f_code), + frame_ptr + pidData->offsets.PyFrameObject_code); + + // read data from PyCodeObject + if (!frame->f_code) + return false; + bpf_probe_read(&frame->co_filename, + sizeof(frame->co_filename), + frame->f_code + pidData->offsets.PyCodeObject_filename); + bpf_probe_read(&frame->co_name, + sizeof(frame->co_name), + frame->f_code + pidData->offsets.PyCodeObject_name); + // read actual names into symbol + if (frame->co_filename) + bpf_probe_read_str(&symbol->file, + sizeof(symbol->file), + frame->co_filename + pidData->offsets.String_data); + if (frame->co_name) + bpf_probe_read_str(&symbol->name, + sizeof(symbol->name), + frame->co_name + pidData->offsets.String_data); + return true; +} + +struct bpf_elf_map SEC("maps") pidmap = { + .type = BPF_MAP_TYPE_HASH, + .size_key = sizeof(int), + .size_value = sizeof(PidData), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") eventmap = { + .type = BPF_MAP_TYPE_HASH, + .size_key = sizeof(int), + .size_value = sizeof(Event), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") symbolmap = { + .type = BPF_MAP_TYPE_HASH, + .size_key = sizeof(Symbol), + .size_value = sizeof(int), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") statsmap = { + .type = BPF_MAP_TYPE_ARRAY, + .size_key = sizeof(Stats), + .size_value = sizeof(int), + .max_elem = 1, +}; + +struct bpf_elf_map SEC("maps") perfmap = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .size_key = sizeof(int), + .size_value = sizeof(int), + .max_elem = 32, +}; + +struct bpf_elf_map SEC("maps") stackmap = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .size_key = sizeof(int), + .size_value = sizeof(long long) * 127, + .max_elem = 1000, +}; + +static inline __attribute__((__always_inline__)) int __on_event(struct pt_regs *ctx) +{ + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + pid_t pid = (pid_t)(pid_tgid >> 32); + PidData* pidData = bpf_map_lookup_elem(&pidmap, &pid); + if (!pidData) + return 0; + + int zero = 0; + Event* event = bpf_map_lookup_elem(&eventmap, &zero); + if (!event) + return 0; + + event->pid = pid; + + event->tid = (pid_t)pid_tgid; + bpf_get_current_comm(&event->comm, sizeof(event->comm)); + + event->user_stack_id = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK); + event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0); + + void* thread_state_current = (void*)0; + bpf_probe_read(&thread_state_current, + sizeof(thread_state_current), + (void*)(long)pidData->current_state_addr); + + struct task_struct* task = (struct task_struct*)bpf_get_current_task(); + void* tls_base = (void*)task; + + void* thread_state = pidData->use_tls ? get_thread_state(tls_base, pidData) + : thread_state_current; + event->thread_current = thread_state == thread_state_current; + + if (pidData->use_tls) { + uint64_t pthread_created; + uint64_t pthread_self; + bpf_probe_read(&pthread_self, sizeof(pthread_self), tls_base + 0x10); + + bpf_probe_read(&pthread_created, + sizeof(pthread_created), + thread_state + pidData->offsets.PyThreadState_thread); + event->pthread_match = pthread_created == pthread_self; + } else { + event->pthread_match = 1; + } + + if (event->pthread_match || !pidData->use_tls) { + void* frame_ptr; + FrameData frame; + Symbol sym = {}; + int cur_cpu = bpf_get_smp_processor_id(); + + bpf_probe_read(&frame_ptr, + sizeof(frame_ptr), + thread_state + pidData->offsets.PyThreadState_frame); + + int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); + if (symbol_counter == NULL) + return 0; +#ifdef NO_UNROLL +#pragma clang loop unroll(disable) +#else +#pragma clang loop unroll(full) +#endif + /* Unwind python stack */ + for (int i = 0; i < STACK_MAX_LEN; ++i) { + if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { + int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; + int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); + if (!symbol_id) { + bpf_map_update_elem(&symbolmap, &sym, &zero, 0); + symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); + if (!symbol_id) + return 0; + } + if (*symbol_id == new_symbol_id) + (*symbol_counter)++; + event->stack[i] = *symbol_id; + event->stack_len = i + 1; + frame_ptr = frame.f_back; + } + } + event->stack_complete = frame_ptr == NULL; + } else { + event->stack_complete = 1; + } + + Stats* stats = bpf_map_lookup_elem(&statsmap, &zero); + if (stats) + stats->success++; + + event->has_meta = 0; + bpf_perf_event_output(ctx, &perfmap, 0, event, offsetof(Event, metadata)); + return 0; +} + +SEC("raw_tracepoint/kfree_skb") +int on_event(struct pt_regs* ctx) +{ + int i, ret = 0; + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ret |= __on_event(ctx); + ret |= __on_event(ctx); + return ret; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/pyperf100.c b/tools/testing/selftests/bpf/progs/pyperf100.c new file mode 100644 index 000000000000..29786325db54 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf100.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 100 +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c new file mode 100644 index 000000000000..c39f559d3100 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf180.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 180 +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf50.c b/tools/testing/selftests/bpf/progs/pyperf50.c new file mode 100644 index 000000000000..ef7ce340a292 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf50.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 50 +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c new file mode 100644 index 000000000000..cb49b89e37cd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf600.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 600 +/* clang will not unroll the loop 600 times. + * Instead it will unroll it to the amount it deemed + * appropriate, but the loop will still execute 600 times. + * Total program size is around 90k insns + */ +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c new file mode 100644 index 000000000000..6beff7502f4d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#define STACK_MAX_LEN 600 +#define NO_UNROLL +/* clang will not unroll at all. + * Total program size is around 2k insns + */ +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c index 9ff8ac4b0bf6..6aabb681fb9a 100644 --- a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c +++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c @@ -7,25 +7,36 @@ #include "bpf_helpers.h" #include "bpf_endian.h" -struct bpf_map_def SEC("maps") socket_cookies = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__u64), - .value_size = sizeof(__u32), - .max_entries = 1 << 8, +struct socket_cookie { + __u64 cookie_key; + __u32 cookie_value; +}; + +struct { + __u32 type; + __u32 map_flags; + int *key; + struct socket_cookie *value; +} socket_cookies SEC(".maps") = { + .type = BPF_MAP_TYPE_SK_STORAGE, + .map_flags = BPF_F_NO_PREALLOC, }; SEC("cgroup/connect6") int set_cookie(struct bpf_sock_addr *ctx) { - __u32 cookie_value = 0xFF; - __u64 cookie_key; + struct socket_cookie *p; if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) return 1; - cookie_key = bpf_get_socket_cookie(ctx); - if (bpf_map_update_elem(&socket_cookies, &cookie_key, &cookie_value, 0)) - return 0; + p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!p) + return 1; + + p->cookie_value = 0xFF; + p->cookie_key = bpf_get_socket_cookie(ctx); return 1; } @@ -33,9 +44,8 @@ int set_cookie(struct bpf_sock_addr *ctx) SEC("sockops") int update_cookie(struct bpf_sock_ops *ctx) { - __u32 new_cookie_value; - __u32 *cookie_value; - __u64 cookie_key; + struct bpf_sock *sk; + struct socket_cookie *p; if (ctx->family != AF_INET6) return 1; @@ -43,14 +53,17 @@ int update_cookie(struct bpf_sock_ops *ctx) if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB) return 1; - cookie_key = bpf_get_socket_cookie(ctx); + if (!ctx->sk) + return 1; + + p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0); + if (!p) + return 1; - cookie_value = bpf_map_lookup_elem(&socket_cookies, &cookie_key); - if (!cookie_value) + if (p->cookie_key != bpf_get_socket_cookie(ctx)) return 1; - new_cookie_value = (ctx->local_port << 8) | *cookie_value; - bpf_map_update_elem(&socket_cookies, &cookie_key, &new_cookie_value, 0); + p->cookie_value = (ctx->local_port << 8) | p->cookie_value; return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c index 0f92858f6226..9390e0244259 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c @@ -1,17 +1,9 @@ #include <linux/bpf.h> #include "bpf_helpers.h" -#include "bpf_util.h" #include "bpf_endian.h" int _version SEC("version") = 1; -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sk_skb1") int bpf_prog1(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c index 12a7b5c82ed6..e80484d98a1a 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c @@ -1,17 +1,10 @@ #include <linux/bpf.h> + #include "bpf_helpers.h" -#include "bpf_util.h" #include "bpf_endian.h" int _version SEC("version") = 1; -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - SEC("sk_msg1") int bpf_prog1(struct sk_msg_md *msg) { diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index 2ce7634a4012..d85c874ef25e 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -1,17 +1,9 @@ #include <linux/bpf.h> #include "bpf_helpers.h" -#include "bpf_util.h" #include "bpf_endian.h" int _version SEC("version") = 1; -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - struct bpf_map_def SEC("maps") sock_map_rx = { .type = BPF_MAP_TYPE_SOCKMAP, .key_size = sizeof(int), diff --git a/tools/testing/selftests/bpf/progs/strobemeta.c b/tools/testing/selftests/bpf/progs/strobemeta.c new file mode 100644 index 000000000000..d3df3d86f092 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/strobemeta.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +// Copyright (c) 2019 Facebook + +#define STROBE_MAX_INTS 2 +#define STROBE_MAX_STRS 25 +#define STROBE_MAX_MAPS 100 +#define STROBE_MAX_MAP_ENTRIES 20 +/* full unroll by llvm #undef NO_UNROLL */ +#include "strobemeta.h" + diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h new file mode 100644 index 000000000000..1ff73f60a3e4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> +#include <linux/bpf.h> +#include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/types.h> +#include "bpf_helpers.h" + +typedef uint32_t pid_t; +struct task_struct {}; + +#define TASK_COMM_LEN 16 +#define PERF_MAX_STACK_DEPTH 127 + +#define STROBE_TYPE_INVALID 0 +#define STROBE_TYPE_INT 1 +#define STROBE_TYPE_STR 2 +#define STROBE_TYPE_MAP 3 + +#define STACK_TABLE_EPOCH_SHIFT 20 +#define STROBE_MAX_STR_LEN 1 +#define STROBE_MAX_CFGS 32 +#define STROBE_MAX_PAYLOAD \ + (STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \ + STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN) + +struct strobe_value_header { + /* + * meaning depends on type: + * 1. int: 0, if value not set, 1 otherwise + * 2. str: 1 always, whether value is set or not is determined by ptr + * 3. map: 1 always, pointer points to additional struct with number + * of entries (up to STROBE_MAX_MAP_ENTRIES) + */ + uint16_t len; + /* + * _reserved might be used for some future fields/flags, but we always + * want to keep strobe_value_header to be 8 bytes, so BPF can read 16 + * bytes in one go and get both header and value + */ + uint8_t _reserved[6]; +}; + +/* + * strobe_value_generic is used from BPF probe only, but needs to be a union + * of strobe_value_int/strobe_value_str/strobe_value_map + */ +struct strobe_value_generic { + struct strobe_value_header header; + union { + int64_t val; + void *ptr; + }; +}; + +struct strobe_value_int { + struct strobe_value_header header; + int64_t value; +}; + +struct strobe_value_str { + struct strobe_value_header header; + const char* value; +}; + +struct strobe_value_map { + struct strobe_value_header header; + const struct strobe_map_raw* value; +}; + +struct strobe_map_entry { + const char* key; + const char* val; +}; + +/* + * Map of C-string key/value pairs with fixed maximum capacity. Each map has + * corresponding int64 ID, which application can use (or ignore) in whatever + * way appropriate. Map is "write-only", there is no way to get data out of + * map. Map is intended to be used to provide metadata for profilers and is + * not to be used for internal in-app communication. All methods are + * thread-safe. + */ +struct strobe_map_raw { + /* + * general purpose unique ID that's up to application to decide + * whether and how to use; for request metadata use case id is unique + * request ID that's used to match metadata with stack traces on + * Strobelight backend side + */ + int64_t id; + /* number of used entries in map */ + int64_t cnt; + /* + * having volatile doesn't change anything on BPF side, but clang + * emits warnings for passing `volatile const char *` into + * bpf_probe_read_str that expects just `const char *` + */ + const char* tag; + /* + * key/value entries, each consisting of 2 pointers to key and value + * C strings + */ + struct strobe_map_entry entries[STROBE_MAX_MAP_ENTRIES]; +}; + +/* Following values define supported values of TLS mode */ +#define TLS_NOT_SET -1 +#define TLS_LOCAL_EXEC 0 +#define TLS_IMM_EXEC 1 +#define TLS_GENERAL_DYN 2 + +/* + * structure that universally represents TLS location (both for static + * executables and shared libraries) + */ +struct strobe_value_loc { + /* + * tls_mode defines what TLS mode was used for particular metavariable: + * - -1 (TLS_NOT_SET) - no metavariable; + * - 0 (TLS_LOCAL_EXEC) - Local Executable mode; + * - 1 (TLS_IMM_EXEC) - Immediate Executable mode; + * - 2 (TLS_GENERAL_DYN) - General Dynamic mode; + * Local Dynamic mode is not yet supported, because never seen in + * practice. Mode defines how offset field is interpreted. See + * calc_location() in below for details. + */ + int64_t tls_mode; + /* + * TLS_LOCAL_EXEC: offset from thread pointer (fs:0 for x86-64, + * tpidr_el0 for aarch64). + * TLS_IMM_EXEC: absolute address of GOT entry containing offset + * from thread pointer; + * TLS_GENERAL_DYN: absolute addres of double GOT entry + * containing tls_index_t struct; + */ + int64_t offset; +}; + +struct strobemeta_cfg { + int64_t req_meta_idx; + struct strobe_value_loc int_locs[STROBE_MAX_INTS]; + struct strobe_value_loc str_locs[STROBE_MAX_STRS]; + struct strobe_value_loc map_locs[STROBE_MAX_MAPS]; +}; + +struct strobe_map_descr { + uint64_t id; + int16_t tag_len; + /* + * cnt <0 - map value isn't set; + * 0 - map has id set, but no key/value entries + */ + int16_t cnt; + /* + * both key_lens[i] and val_lens[i] should be >0 for present key/value + * entry + */ + uint16_t key_lens[STROBE_MAX_MAP_ENTRIES]; + uint16_t val_lens[STROBE_MAX_MAP_ENTRIES]; +}; + +struct strobemeta_payload { + /* req_id has valid request ID, if req_meta_valid == 1 */ + int64_t req_id; + uint8_t req_meta_valid; + /* + * mask has Nth bit set to 1, if Nth metavar was present and + * successfully read + */ + uint64_t int_vals_set_mask; + int64_t int_vals[STROBE_MAX_INTS]; + /* len is >0 for present values */ + uint16_t str_lens[STROBE_MAX_STRS]; + /* if map_descrs[i].cnt == -1, metavar is not present/set */ + struct strobe_map_descr map_descrs[STROBE_MAX_MAPS]; + /* + * payload has compactly packed values of str and map variables in the + * form: strval1\0strval2\0map1key1\0map1val1\0map2key1\0map2val1\0 + * (and so on); str_lens[i], key_lens[i] and val_lens[i] determines + * value length + */ + char payload[STROBE_MAX_PAYLOAD]; +}; + +struct strobelight_bpf_sample { + uint64_t ktime; + char comm[TASK_COMM_LEN]; + pid_t pid; + int user_stack_id; + int kernel_stack_id; + int has_meta; + struct strobemeta_payload metadata; + /* + * makes it possible to pass (<real payload size> + 1) as data size to + * perf_submit() to avoid perf_submit's paranoia about passing zero as + * size, as it deduces that <real payload size> might be + * **theoretically** zero + */ + char dummy_safeguard; +}; + +struct bpf_map_def SEC("maps") samples = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 32, +}; + +struct bpf_map_def SEC("maps") stacks_0 = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(uint32_t), + .value_size = sizeof(uint64_t) * PERF_MAX_STACK_DEPTH, + .max_entries = 16, +}; + +struct bpf_map_def SEC("maps") stacks_1 = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(uint32_t), + .value_size = sizeof(uint64_t) * PERF_MAX_STACK_DEPTH, + .max_entries = 16, +}; + +struct bpf_map_def SEC("maps") sample_heap = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(uint32_t), + .value_size = sizeof(struct strobelight_bpf_sample), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") strobemeta_cfgs = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(pid_t), + .value_size = sizeof(struct strobemeta_cfg), + .max_entries = STROBE_MAX_CFGS, +}; + +/* Type for the dtv. */ +/* https://github.com/lattera/glibc/blob/master/nptl/sysdeps/x86_64/tls.h#L34 */ +typedef union dtv { + size_t counter; + struct { + void* val; + bool is_static; + } pointer; +} dtv_t; + +/* Partial definition for tcbhead_t */ +/* https://github.com/bminor/glibc/blob/master/sysdeps/x86_64/nptl/tls.h#L42 */ +struct tcbhead { + void* tcb; + dtv_t* dtv; +}; + +/* + * TLS module/offset information for shared library case. + * For x86-64, this is mapped onto two entries in GOT. + * For aarch64, this is pointed to by second GOT entry. + */ +struct tls_index { + uint64_t module; + uint64_t offset; +}; + +static inline __attribute__((always_inline)) +void *calc_location(struct strobe_value_loc *loc, void *tls_base) +{ + /* + * tls_mode value is: + * - -1 (TLS_NOT_SET), if no metavar is present; + * - 0 (TLS_LOCAL_EXEC), if metavar uses Local Executable mode of TLS + * (offset from fs:0 for x86-64 or tpidr_el0 for aarch64); + * - 1 (TLS_IMM_EXEC), if metavar uses Immediate Executable mode of TLS; + * - 2 (TLS_GENERAL_DYN), if metavar uses General Dynamic mode of TLS; + * This schema allows to use something like: + * (tls_mode + 1) * (tls_base + offset) + * to get NULL for "no metavar" location, or correct pointer for local + * executable mode without doing extra ifs. + */ + if (loc->tls_mode <= TLS_LOCAL_EXEC) { + /* static executable is simple, we just have offset from + * tls_base */ + void *addr = tls_base + loc->offset; + /* multiply by (tls_mode + 1) to get NULL, if we have no + * metavar in this slot */ + return (void *)((loc->tls_mode + 1) * (int64_t)addr); + } + /* + * Other modes are more complicated, we need to jump through few hoops. + * + * For immediate executable mode (currently supported only for aarch64): + * - loc->offset is pointing to a GOT entry containing fixed offset + * relative to tls_base; + * + * For general dynamic mode: + * - loc->offset is pointing to a beginning of double GOT entries; + * - (for aarch64 only) second entry points to tls_index_t struct; + * - (for x86-64 only) two GOT entries are already tls_index_t; + * - tls_index_t->module is used to find start of TLS section in + * which variable resides; + * - tls_index_t->offset provides offset within that TLS section, + * pointing to value of variable. + */ + struct tls_index tls_index; + dtv_t *dtv; + void *tls_ptr; + + bpf_probe_read(&tls_index, sizeof(struct tls_index), + (void *)loc->offset); + /* valid module index is always positive */ + if (tls_index.module > 0) { + /* dtv = ((struct tcbhead *)tls_base)->dtv[tls_index.module] */ + bpf_probe_read(&dtv, sizeof(dtv), + &((struct tcbhead *)tls_base)->dtv); + dtv += tls_index.module; + } else { + dtv = NULL; + } + bpf_probe_read(&tls_ptr, sizeof(void *), dtv); + /* if pointer has (void *)-1 value, then TLS wasn't initialized yet */ + return tls_ptr && tls_ptr != (void *)-1 + ? tls_ptr + tls_index.offset + : NULL; +} + +static inline __attribute__((always_inline)) +void read_int_var(struct strobemeta_cfg *cfg, size_t idx, void *tls_base, + struct strobe_value_generic *value, + struct strobemeta_payload *data) +{ + void *location = calc_location(&cfg->int_locs[idx], tls_base); + if (!location) + return; + + bpf_probe_read(value, sizeof(struct strobe_value_generic), location); + data->int_vals[idx] = value->val; + if (value->header.len) + data->int_vals_set_mask |= (1 << idx); +} + +static inline __attribute__((always_inline)) +uint64_t read_str_var(struct strobemeta_cfg* cfg, size_t idx, void *tls_base, + struct strobe_value_generic *value, + struct strobemeta_payload *data, void *payload) +{ + void *location; + uint32_t len; + + data->str_lens[idx] = 0; + location = calc_location(&cfg->str_locs[idx], tls_base); + if (!location) + return 0; + + bpf_probe_read(value, sizeof(struct strobe_value_generic), location); + len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, value->ptr); + /* + * if bpf_probe_read_str returns error (<0), due to casting to + * unsinged int, it will become big number, so next check is + * sufficient to check for errors AND prove to BPF verifier, that + * bpf_probe_read_str won't return anything bigger than + * STROBE_MAX_STR_LEN + */ + if (len > STROBE_MAX_STR_LEN) + return 0; + + data->str_lens[idx] = len; + return len; +} + +static inline __attribute__((always_inline)) +void *read_map_var(struct strobemeta_cfg *cfg, size_t idx, void *tls_base, + struct strobe_value_generic *value, + struct strobemeta_payload* data, void *payload) +{ + struct strobe_map_descr* descr = &data->map_descrs[idx]; + struct strobe_map_raw map; + void *location; + uint32_t len; + int i; + + descr->tag_len = 0; /* presume no tag is set */ + descr->cnt = -1; /* presume no value is set */ + + location = calc_location(&cfg->map_locs[idx], tls_base); + if (!location) + return payload; + + bpf_probe_read(value, sizeof(struct strobe_value_generic), location); + if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr)) + return payload; + + descr->id = map.id; + descr->cnt = map.cnt; + if (cfg->req_meta_idx == idx) { + data->req_id = map.id; + data->req_meta_valid = 1; + } + + len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag); + if (len <= STROBE_MAX_STR_LEN) { + descr->tag_len = len; + payload += len; + } + +#ifdef NO_UNROLL +#pragma clang loop unroll(disable) +#else +#pragma unroll +#endif + for (int i = 0; i < STROBE_MAX_MAP_ENTRIES && i < map.cnt; ++i) { + descr->key_lens[i] = 0; + len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, + map.entries[i].key); + if (len <= STROBE_MAX_STR_LEN) { + descr->key_lens[i] = len; + payload += len; + } + descr->val_lens[i] = 0; + len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, + map.entries[i].val); + if (len <= STROBE_MAX_STR_LEN) { + descr->val_lens[i] = len; + payload += len; + } + } + + return payload; +} + +/* + * read_strobe_meta returns NULL, if no metadata was read; otherwise returns + * pointer to *right after* payload ends + */ +static inline __attribute__((always_inline)) +void *read_strobe_meta(struct task_struct* task, + struct strobemeta_payload* data) { + pid_t pid = bpf_get_current_pid_tgid() >> 32; + struct strobe_value_generic value = {0}; + struct strobemeta_cfg *cfg; + void *tls_base, *payload; + + cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid); + if (!cfg) + return NULL; + + data->int_vals_set_mask = 0; + data->req_meta_valid = 0; + payload = data->payload; + /* + * we don't have struct task_struct definition, it should be: + * tls_base = (void *)task->thread.fsbase; + */ + tls_base = (void *)task; + +#ifdef NO_UNROLL +#pragma clang loop unroll(disable) +#else +#pragma unroll +#endif + for (int i = 0; i < STROBE_MAX_INTS; ++i) { + read_int_var(cfg, i, tls_base, &value, data); + } +#ifdef NO_UNROLL +#pragma clang loop unroll(disable) +#else +#pragma unroll +#endif + for (int i = 0; i < STROBE_MAX_STRS; ++i) { + payload += read_str_var(cfg, i, tls_base, &value, data, payload); + } +#ifdef NO_UNROLL +#pragma clang loop unroll(disable) +#else +#pragma unroll +#endif + for (int i = 0; i < STROBE_MAX_MAPS; ++i) { + payload = read_map_var(cfg, i, tls_base, &value, data, payload); + } + /* + * return pointer right after end of payload, so it's possible to + * calculate exact amount of useful data that needs to be sent + */ + return payload; +} + +SEC("raw_tracepoint/kfree_skb") +int on_event(struct pt_regs *ctx) { + pid_t pid = bpf_get_current_pid_tgid() >> 32; + struct strobelight_bpf_sample* sample; + struct task_struct *task; + uint32_t zero = 0; + uint64_t ktime_ns; + void *sample_end; + + sample = bpf_map_lookup_elem(&sample_heap, &zero); + if (!sample) + return 0; /* this will never happen */ + + sample->pid = pid; + bpf_get_current_comm(&sample->comm, TASK_COMM_LEN); + ktime_ns = bpf_ktime_get_ns(); + sample->ktime = ktime_ns; + + task = (struct task_struct *)bpf_get_current_task(); + sample_end = read_strobe_meta(task, &sample->metadata); + sample->has_meta = sample_end != NULL; + sample_end = sample_end ? : &sample->metadata; + + if ((ktime_ns >> STACK_TABLE_EPOCH_SHIFT) & 1) { + sample->kernel_stack_id = bpf_get_stackid(ctx, &stacks_1, 0); + sample->user_stack_id = bpf_get_stackid(ctx, &stacks_1, BPF_F_USER_STACK); + } else { + sample->kernel_stack_id = bpf_get_stackid(ctx, &stacks_0, 0); + sample->user_stack_id = bpf_get_stackid(ctx, &stacks_0, BPF_F_USER_STACK); + } + + uint64_t sample_size = sample_end - (void *)sample; + /* should always be true */ + if (sample_size < sizeof(struct strobelight_bpf_sample)) + bpf_perf_event_output(ctx, &samples, 0, sample, 1 + sample_size); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c b/tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c new file mode 100644 index 000000000000..f0a1669e11d6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +// Copyright (c) 2019 Facebook + +#define STROBE_MAX_INTS 2 +#define STROBE_MAX_STRS 25 +#define STROBE_MAX_MAPS 13 +#define STROBE_MAX_MAP_ENTRIES 20 +#define NO_UNROLL +#include "strobemeta.h" diff --git a/tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c b/tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c new file mode 100644 index 000000000000..4291a7d642e7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +// Copyright (c) 2019 Facebook + +#define STROBE_MAX_INTS 2 +#define STROBE_MAX_STRS 25 +#define STROBE_MAX_MAPS 30 +#define STROBE_MAX_MAP_ENTRIES 20 +#define NO_UNROLL +#include "strobemeta.h" diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c new file mode 100644 index 000000000000..28c16bb583b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +struct ipv_counts { + unsigned int v4; + unsigned int v6; +}; + +/* just to validate we can handle maps in multiple sections */ +struct bpf_map_def SEC("maps") btf_map_legacy = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(long long), + .max_entries = 4, +}; + +BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts); + +struct { + int *key; + struct ipv_counts *value; + unsigned int type; + unsigned int max_entries; +} btf_map SEC(".maps") = { + .type = BPF_MAP_TYPE_ARRAY, + .max_entries = 4, +}; + +struct dummy_tracepoint_args { + unsigned long long pad; + struct sock *sock; +}; + +__attribute__((noinline)) +static int test_long_fname_2(struct dummy_tracepoint_args *arg) +{ + struct ipv_counts *counts; + int key = 0; + + if (!arg->sock) + return 0; + + counts = bpf_map_lookup_elem(&btf_map, &key); + if (!counts) + return 0; + + counts->v6++; + + /* just verify we can reference both maps */ + counts = bpf_map_lookup_elem(&btf_map_legacy, &key); + if (!counts) + return 0; + + return 0; +} + +__attribute__((noinline)) +static int test_long_fname_1(struct dummy_tracepoint_args *arg) +{ + return test_long_fname_2(arg); +} + +SEC("dummy_tracepoint") +int _dummy_tracepoint(struct dummy_tracepoint_args *arg) +{ + return test_long_fname_1(arg); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c index f6d9f238e00a..aaa6ec250e15 100644 --- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c @@ -15,17 +15,25 @@ struct stack_trace_t { struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; }; -struct bpf_map_def SEC("maps") perfmap = { +struct { + __u32 type; + __u32 max_entries; + __u32 key_size; + __u32 value_size; +} perfmap SEC(".maps") = { .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .max_entries = 2, .key_size = sizeof(int), .value_size = sizeof(__u32), - .max_entries = 2, }; -struct bpf_map_def SEC("maps") stackdata_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct stack_trace_t *value; +} stackdata_map SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct stack_trace_t), .max_entries = 1, }; @@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = { * issue and avoid complicated C programming massaging. * This is an acceptable workaround since there is one entry here. */ -struct bpf_map_def SEC("maps") rawdata_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u64 (*value)[2 * MAX_STACK_RAWTP]; +} rawdata_map SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2, .max_entries = 1, }; diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c index 5ab14e941980..866cc7ddbe43 100644 --- a/tools/testing/selftests/bpf/progs/test_global_data.c +++ b/tools/testing/selftests/bpf/progs/test_global_data.c @@ -7,17 +7,23 @@ #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") result_number = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u64 *value; +} result_number SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64), .max_entries = 11, }; -struct bpf_map_def SEC("maps") result_string = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + const char (*value)[32]; +} result_string SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = 32, .max_entries = 5, }; @@ -27,10 +33,13 @@ struct foo { __u64 c; }; -struct bpf_map_def SEC("maps") result_struct = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct foo *value; +} result_struct SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct foo), .max_entries = 5, }; diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c index 1e10c9590991..848cbb90f581 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb.c @@ -169,38 +169,53 @@ struct eth_hdr { unsigned short eth_proto; }; -struct bpf_map_def SEC("maps") vip_map = { +struct { + __u32 type; + __u32 max_entries; + struct vip *key; + struct vip_meta *value; +} vip_map SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct vip), - .value_size = sizeof(struct vip_meta), .max_entries = MAX_VIPS, }; -struct bpf_map_def SEC("maps") ch_rings = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} ch_rings SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = CH_RINGS_SIZE, }; -struct bpf_map_def SEC("maps") reals = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct real_definition *value; +} reals SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct real_definition), .max_entries = MAX_REALS, }; -struct bpf_map_def SEC("maps") stats = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct vip_stats *value; +} stats SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct vip_stats), .max_entries = MAX_VIPS, }; -struct bpf_map_def SEC("maps") ctl_array = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct ctl_value *value; +} ctl_array SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct ctl_value), .max_entries = CTL_MAP_SIZE, }; diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c index ba44a14e6dc4..c63ecf3ca573 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c @@ -165,38 +165,53 @@ struct eth_hdr { unsigned short eth_proto; }; -struct bpf_map_def SEC("maps") vip_map = { +struct { + __u32 type; + __u32 max_entries; + struct vip *key; + struct vip_meta *value; +} vip_map SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct vip), - .value_size = sizeof(struct vip_meta), .max_entries = MAX_VIPS, }; -struct bpf_map_def SEC("maps") ch_rings = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} ch_rings SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = CH_RINGS_SIZE, }; -struct bpf_map_def SEC("maps") reals = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct real_definition *value; +} reals SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct real_definition), .max_entries = MAX_REALS, }; -struct bpf_map_def SEC("maps") stats = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct vip_stats *value; +} stats SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct vip_stats), .max_entries = MAX_VIPS, }; -struct bpf_map_def SEC("maps") ctl_array = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct ctl_value *value; +} ctl_array SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct ctl_value), .max_entries = CTL_MAP_SIZE, }; diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c index 0575751bc1bc..7c7cb3177463 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c @@ -6,13 +6,6 @@ #include "bpf_helpers.h" #include "bpf_endian.h" -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c index af8cc68ed2f9..40d9c2853393 100644 --- a/tools/testing/selftests/bpf/progs/test_map_lock.c +++ b/tools/testing/selftests/bpf/progs/test_map_lock.c @@ -11,29 +11,31 @@ struct hmap_elem { int var[VAR_NUM]; }; -struct bpf_map_def SEC("maps") hash_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct hmap_elem *value; +} hash_map SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(int), - .value_size = sizeof(struct hmap_elem), .max_entries = 1, }; -BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem); - struct array_elem { struct bpf_spin_lock lock; int var[VAR_NUM]; }; -struct bpf_map_def SEC("maps") array_map = { +struct { + __u32 type; + __u32 max_entries; + int *key; + struct array_elem *value; +} array_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(struct array_elem), .max_entries = 1, }; -BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem); - SEC("map_lock_demo") int bpf_map_lock_test(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c new file mode 100644 index 000000000000..463964d79f73 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -0,0 +1,261 @@ +#include <stddef.h> +#include <inttypes.h> +#include <errno.h> +#include <linux/seg6_local.h> +#include <linux/bpf.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" + +/* Packet parsing state machine helpers. */ +#define cursor_advance(_cursor, _len) \ + ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) + +#define SR6_FLAG_ALERT (1 << 4) + +#define htonll(x) ((bpf_htonl(1)) == 1 ? (x) : ((uint64_t)bpf_htonl((x) & \ + 0xFFFFFFFF) << 32) | bpf_htonl((x) >> 32)) +#define ntohll(x) ((bpf_ntohl(1)) == 1 ? (x) : ((uint64_t)bpf_ntohl((x) & \ + 0xFFFFFFFF) << 32) | bpf_ntohl((x) >> 32)) +#define BPF_PACKET_HEADER __attribute__((packed)) + +struct ip6_t { + unsigned int ver:4; + unsigned int priority:8; + unsigned int flow_label:20; + unsigned short payload_len; + unsigned char next_header; + unsigned char hop_limit; + unsigned long long src_hi; + unsigned long long src_lo; + unsigned long long dst_hi; + unsigned long long dst_lo; +} BPF_PACKET_HEADER; + +struct ip6_addr_t { + unsigned long long hi; + unsigned long long lo; +} BPF_PACKET_HEADER; + +struct ip6_srh_t { + unsigned char nexthdr; + unsigned char hdrlen; + unsigned char type; + unsigned char segments_left; + unsigned char first_segment; + unsigned char flags; + unsigned short tag; + + struct ip6_addr_t segments[0]; +} BPF_PACKET_HEADER; + +struct sr6_tlv_t { + unsigned char type; + unsigned char len; + unsigned char value[0]; +} BPF_PACKET_HEADER; + +static __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb) +{ + void *cursor, *data_end; + struct ip6_srh_t *srh; + struct ip6_t *ip; + uint8_t *ipver; + + data_end = (void *)(long)skb->data_end; + cursor = (void *)(long)skb->data; + ipver = (uint8_t *)cursor; + + if ((void *)ipver + sizeof(*ipver) > data_end) + return NULL; + + if ((*ipver >> 4) != 6) + return NULL; + + ip = cursor_advance(cursor, sizeof(*ip)); + if ((void *)ip + sizeof(*ip) > data_end) + return NULL; + + if (ip->next_header != 43) + return NULL; + + srh = cursor_advance(cursor, sizeof(*srh)); + if ((void *)srh + sizeof(*srh) > data_end) + return NULL; + + if (srh->type != 4) + return NULL; + + return srh; +} + +static __attribute__((always_inline)) +int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad, + uint32_t old_pad, uint32_t pad_off) +{ + int err; + + if (new_pad != old_pad) { + err = bpf_lwt_seg6_adjust_srh(skb, pad_off, + (int) new_pad - (int) old_pad); + if (err) + return err; + } + + if (new_pad > 0) { + char pad_tlv_buf[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0}; + struct sr6_tlv_t *pad_tlv = (struct sr6_tlv_t *) pad_tlv_buf; + + pad_tlv->type = SR6_TLV_PADDING; + pad_tlv->len = new_pad - 2; + + err = bpf_lwt_seg6_store_bytes(skb, pad_off, + (void *)pad_tlv_buf, new_pad); + if (err) + return err; + } + + return 0; +} + +static __attribute__((always_inline)) +int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh, + uint32_t *tlv_off, uint32_t *pad_size, + uint32_t *pad_off) +{ + uint32_t srh_off, cur_off; + int offset_valid = 0; + int err; + + srh_off = (char *)srh - (char *)(long)skb->data; + // cur_off = end of segments, start of possible TLVs + cur_off = srh_off + sizeof(*srh) + + sizeof(struct ip6_addr_t) * (srh->first_segment + 1); + + *pad_off = 0; + + // we can only go as far as ~10 TLVs due to the BPF max stack size + #pragma clang loop unroll(disable) + for (int i = 0; i < 100; i++) { + struct sr6_tlv_t tlv; + + if (cur_off == *tlv_off) + offset_valid = 1; + + if (cur_off >= srh_off + ((srh->hdrlen + 1) << 3)) + break; + + err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv)); + if (err) + return err; + + if (tlv.type == SR6_TLV_PADDING) { + *pad_size = tlv.len + sizeof(tlv); + *pad_off = cur_off; + + if (*tlv_off == srh_off) { + *tlv_off = cur_off; + offset_valid = 1; + } + break; + + } else if (tlv.type == SR6_TLV_HMAC) { + break; + } + + cur_off += sizeof(tlv) + tlv.len; + } // we reached the padding or HMAC TLVs, or the end of the SRH + + if (*pad_off == 0) + *pad_off = cur_off; + + if (*tlv_off == -1) + *tlv_off = cur_off; + else if (!offset_valid) + return -EINVAL; + + return 0; +} + +static __attribute__((always_inline)) +int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off, + struct sr6_tlv_t *itlv, uint8_t tlv_size) +{ + uint32_t srh_off = (char *)srh - (char *)(long)skb->data; + uint8_t len_remaining, new_pad; + uint32_t pad_off = 0; + uint32_t pad_size = 0; + uint32_t partial_srh_len; + int err; + + if (tlv_off != -1) + tlv_off += srh_off; + + if (itlv->type == SR6_TLV_PADDING || itlv->type == SR6_TLV_HMAC) + return -EINVAL; + + err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off); + if (err) + return err; + + err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len); + if (err) + return err; + + err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size); + if (err) + return err; + + // the following can't be moved inside update_tlv_pad because the + // bpf verifier has some issues with it + pad_off += sizeof(*itlv) + itlv->len; + partial_srh_len = pad_off - srh_off; + len_remaining = partial_srh_len % 8; + new_pad = 8 - len_remaining; + + if (new_pad == 1) // cannot pad for 1 byte only + new_pad = 9; + else if (new_pad == 8) + new_pad = 0; + + return update_tlv_pad(skb, new_pad, pad_size, pad_off); +} + +// Add an Egress TLV fc00::4, add the flag A, +// and apply End.X action to fc42::1 +SEC("lwt_seg6local") +int __add_egr_x(struct __sk_buff *skb) +{ + unsigned long long hi = 0xfc42000000000000; + unsigned long long lo = 0x1; + struct ip6_srh_t *srh = get_srh(skb); + uint8_t new_flags = SR6_FLAG_ALERT; + struct ip6_addr_t addr; + int err, offset; + + if (srh == NULL) + return BPF_DROP; + + uint8_t tlv[20] = {2, 18, 0, 0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}; + + err = add_tlv(skb, srh, (srh->hdrlen+1) << 3, + (struct sr6_tlv_t *)&tlv, 20); + if (err) + return BPF_DROP; + + offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, flags); + err = bpf_lwt_seg6_store_bytes(skb, offset, + (void *)&new_flags, sizeof(new_flags)); + if (err) + return BPF_DROP; + + addr.lo = htonll(lo); + addr.hi = htonll(hi); + err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X, + (void *)&addr, sizeof(addr)); + if (err) + return BPF_DROP; + return BPF_REDIRECT; +} +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index 5b54ec637ada..435a9527733e 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -21,38 +21,55 @@ int _version SEC("version") = 1; #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif -struct bpf_map_def SEC("maps") outer_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 key_size; + __u32 value_size; +} outer_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, + .max_entries = 1, .key_size = sizeof(__u32), .value_size = sizeof(__u32), - .max_entries = 1, }; -struct bpf_map_def SEC("maps") result_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} result_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = NR_RESULTS, }; -struct bpf_map_def SEC("maps") tmp_index_ovr_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + int *value; +} tmp_index_ovr_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(int), .max_entries = 1, }; -struct bpf_map_def SEC("maps") linum_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} linum_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = 1, }; -struct bpf_map_def SEC("maps") data_check_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct data_check *value; +} data_check_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct data_check), .max_entries = 1, }; diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c new file mode 100644 index 000000000000..6ac68be5d68b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <linux/bpf.h> +#include <linux/version.h> +#include "bpf_helpers.h" + +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u64 *value; +} info_map SEC(".maps") = { + .type = BPF_MAP_TYPE_ARRAY, + .max_entries = 1, +}; + +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u64 *value; +} status_map SEC(".maps") = { + .type = BPF_MAP_TYPE_ARRAY, + .max_entries = 1, +}; + +SEC("send_signal_demo") +int bpf_send_signal_test(void *ctx) +{ + __u64 *info_val, *status_val; + __u32 key = 0, pid, sig; + int ret; + + status_val = bpf_map_lookup_elem(&status_map, &key); + if (!status_val || *status_val != 0) + return 0; + + info_val = bpf_map_lookup_elem(&info_map, &key); + if (!info_val || *info_val == 0) + return 0; + + sig = *info_val >> 32; + pid = *info_val & 0xffffFFFF; + + if ((bpf_get_current_pid_tgid() >> 32) == pid) { + ret = bpf_send_signal(sig); + if (ret == 0) + *status_val = 1; + } + + return 0; +} +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c index 1c39e4ccb7f1..c3d383d650cb 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c @@ -27,31 +27,43 @@ enum bpf_linum_array_idx { __NR_BPF_LINUM_ARRAY_IDX, }; -struct bpf_map_def SEC("maps") addr_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct sockaddr_in6 *value; +} addr_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct sockaddr_in6), .max_entries = __NR_BPF_ADDR_ARRAY_IDX, }; -struct bpf_map_def SEC("maps") sock_result_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct bpf_sock *value; +} sock_result_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct bpf_sock), .max_entries = __NR_BPF_RESULT_ARRAY_IDX, }; -struct bpf_map_def SEC("maps") tcp_sock_result_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct bpf_tcp_sock *value; +} tcp_sock_result_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct bpf_tcp_sock), .max_entries = __NR_BPF_RESULT_ARRAY_IDX, }; -struct bpf_map_def SEC("maps") linum_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} linum_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = __NR_BPF_LINUM_ARRAY_IDX, }; @@ -60,26 +72,26 @@ struct bpf_spinlock_cnt { __u32 cnt; }; -struct bpf_map_def SEC("maps") sk_pkt_out_cnt = { +struct { + __u32 type; + __u32 map_flags; + int *key; + struct bpf_spinlock_cnt *value; +} sk_pkt_out_cnt SEC(".maps") = { .type = BPF_MAP_TYPE_SK_STORAGE, - .key_size = sizeof(int), - .value_size = sizeof(struct bpf_spinlock_cnt), - .max_entries = 0, .map_flags = BPF_F_NO_PREALLOC, }; -BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt, int, struct bpf_spinlock_cnt); - -struct bpf_map_def SEC("maps") sk_pkt_out_cnt10 = { +struct { + __u32 type; + __u32 map_flags; + int *key; + struct bpf_spinlock_cnt *value; +} sk_pkt_out_cnt10 SEC(".maps") = { .type = BPF_MAP_TYPE_SK_STORAGE, - .key_size = sizeof(int), - .value_size = sizeof(struct bpf_spinlock_cnt), - .max_entries = 0, .map_flags = BPF_F_NO_PREALLOC, }; -BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt10, int, struct bpf_spinlock_cnt); - static bool is_loopback6(__u32 *a6) { return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1); diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index 40f904312090..0a77ae36d981 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -10,30 +10,29 @@ struct hmap_elem { int test_padding; }; -struct bpf_map_def SEC("maps") hmap = { +struct { + __u32 type; + __u32 max_entries; + int *key; + struct hmap_elem *value; +} hmap SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(int), - .value_size = sizeof(struct hmap_elem), .max_entries = 1, }; -BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem); - - struct cls_elem { struct bpf_spin_lock lock; volatile int cnt; }; -struct bpf_map_def SEC("maps") cls_map = { +struct { + __u32 type; + struct bpf_cgroup_storage_key *key; + struct cls_elem *value; +} cls_map SEC(".maps") = { .type = BPF_MAP_TYPE_CGROUP_STORAGE, - .key_size = sizeof(struct bpf_cgroup_storage_key), - .value_size = sizeof(struct cls_elem), }; -BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key, - struct cls_elem); - struct bpf_vqueue { struct bpf_spin_lock lock; /* 4 byte hole */ @@ -42,14 +41,16 @@ struct bpf_vqueue { unsigned int rate; }; -struct bpf_map_def SEC("maps") vqueue = { +struct { + __u32 type; + __u32 max_entries; + int *key; + struct bpf_vqueue *value; +} vqueue SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(struct bpf_vqueue), .max_entries = 1, }; -BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue); #define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20) SEC("spin_lock_demo") diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c index d86c281e957f..fcf2280bb60c 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c @@ -8,34 +8,50 @@ #define PERF_MAX_STACK_DEPTH 127 #endif -struct bpf_map_def SEC("maps") control_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} control_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = 1, }; -struct bpf_map_def SEC("maps") stackid_hmap = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} stackid_hmap SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = 16384, }; -struct bpf_map_def SEC("maps") stackmap = { +typedef struct bpf_stack_build_id stack_trace_t[PERF_MAX_STACK_DEPTH]; + +struct { + __u32 type; + __u32 max_entries; + __u32 map_flags; + __u32 key_size; + __u32 value_size; +} stackmap SEC(".maps") = { .type = BPF_MAP_TYPE_STACK_TRACE, - .key_size = sizeof(__u32), - .value_size = sizeof(struct bpf_stack_build_id) - * PERF_MAX_STACK_DEPTH, .max_entries = 128, .map_flags = BPF_F_STACK_BUILD_ID, + .key_size = sizeof(__u32), + .value_size = sizeof(stack_trace_t), }; -struct bpf_map_def SEC("maps") stack_amap = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + /* there seems to be a bug in kernel not handling typedef properly */ + struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH]; +} stack_amap SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct bpf_stack_build_id) - * PERF_MAX_STACK_DEPTH, .max_entries = 128, }; diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c index af111af7ca1a..7ad09adbf648 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c @@ -8,31 +8,47 @@ #define PERF_MAX_STACK_DEPTH 127 #endif -struct bpf_map_def SEC("maps") control_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} control_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = 1, }; -struct bpf_map_def SEC("maps") stackid_hmap = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} stackid_hmap SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = 16384, }; -struct bpf_map_def SEC("maps") stackmap = { +typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH]; + +struct { + __u32 type; + __u32 max_entries; + __u32 key_size; + __u32 value_size; +} stackmap SEC(".maps") = { .type = BPF_MAP_TYPE_STACK_TRACE, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH, .max_entries = 16384, + .key_size = sizeof(__u32), + .value_size = sizeof(stack_trace_t), }; -struct bpf_map_def SEC("maps") stack_amap = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u64 (*value)[PERF_MAX_STACK_DEPTH]; +} stack_amap SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH, .max_entries = 16384, }; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c new file mode 100644 index 000000000000..608a06871572 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <stdint.h> +#include <string.h> + +#include <linux/stddef.h> +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */ +#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */ +#define MAX_ULONG_STR_LEN 7 +#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN) + +static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) +{ + volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string"; + unsigned char i; + char name[64]; + int ret; + + memset(name, 0, sizeof(name)); + ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0); + if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) + return 0; + +#pragma clang loop unroll(disable) + for (i = 0; i < sizeof(tcp_mem_name); ++i) + if (name[i] != tcp_mem_name[i]) + return 0; + + return 1; +} + +SEC("cgroup/sysctl") +int sysctl_tcp_mem(struct bpf_sysctl *ctx) +{ + unsigned long tcp_mem[TCP_MEM_LOOPS] = {}; + char value[MAX_VALUE_STR_LEN]; + unsigned char i, off = 0; + int ret; + + if (ctx->write) + return 0; + + if (!is_tcp_mem(ctx)) + return 0; + + ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN); + if (ret < 0 || ret >= MAX_VALUE_STR_LEN) + return 0; + +#pragma clang loop unroll(disable) + for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { + ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, + tcp_mem + i); + if (ret <= 0 || ret > MAX_ULONG_STR_LEN) + return 0; + off += ret & MAX_ULONG_STR_LEN; + } + + return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2]; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c new file mode 100644 index 000000000000..cb201cbe11e7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <stdint.h> +#include <string.h> + +#include <linux/stddef.h> +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */ +#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */ +#define MAX_ULONG_STR_LEN 7 +#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN) + +static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx) +{ + volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop"; + unsigned char i; + char name[64]; + int ret; + + memset(name, 0, sizeof(name)); + ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0); + if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) + return 0; + +#pragma clang loop unroll(disable) + for (i = 0; i < sizeof(tcp_mem_name); ++i) + if (name[i] != tcp_mem_name[i]) + return 0; + + return 1; +} + + +SEC("cgroup/sysctl") +int sysctl_tcp_mem(struct bpf_sysctl *ctx) +{ + unsigned long tcp_mem[TCP_MEM_LOOPS] = {}; + char value[MAX_VALUE_STR_LEN]; + unsigned char i, off = 0; + int ret; + + if (ctx->write) + return 0; + + if (!is_tcp_mem(ctx)) + return 0; + + ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN); + if (ret < 0 || ret >= MAX_VALUE_STR_LEN) + return 0; + +#pragma clang loop unroll(disable) + for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { + ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, + tcp_mem + i); + if (ret <= 0 || ret > MAX_ULONG_STR_LEN) + return 0; + off += ret & MAX_ULONG_STR_LEN; + } + + return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2]; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index a295cad805d7..5cbbff416998 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -8,7 +8,6 @@ #include <linux/bpf.h> #include "bpf_helpers.h" -#include "bpf_util.h" /* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */ #define MAX_ULONG_STR_LEN 0xF @@ -16,6 +15,10 @@ /* Max supported length of sysctl value string (pow2). */ #define MAX_VALUE_STR_LEN 0x40 +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) { char tcp_mem_name[] = "net/ipv4/tcp_mem"; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index bee3bbecc0c4..df98f7e32832 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -148,10 +148,13 @@ struct tcp_estats_basic_event { struct tcp_estats_conn_id conn_id; }; -struct bpf_map_def SEC("maps") ev_record_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct tcp_estats_basic_event *value; +} ev_record_map SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__u32), - .value_size = sizeof(struct tcp_estats_basic_event), .max_entries = 1024, }; diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index c7c3240e0dd4..38e10c9fd996 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -14,17 +14,23 @@ #include "bpf_endian.h" #include "test_tcpbpf.h" -struct bpf_map_def SEC("maps") global_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct tcpbpf_globals *value; +} global_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct tcpbpf_globals), .max_entries = 4, }; -struct bpf_map_def SEC("maps") sockopt_results = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + int *value; +} sockopt_results SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(int), .max_entries = 2, }; diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c index ec6db6e64c41..d073d37d4e27 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c @@ -14,18 +14,26 @@ #include "bpf_endian.h" #include "test_tcpnotify.h" -struct bpf_map_def SEC("maps") global_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct tcpnotify_globals *value; +} global_map SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct tcpnotify_globals), .max_entries = 4, }; -struct bpf_map_def SEC("maps") perf_event_map = { +struct { + __u32 type; + __u32 max_entries; + __u32 key_size; + __u32 value_size; +} perf_event_map SEC(".maps") = { .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .max_entries = 2, .key_size = sizeof(int), .value_size = sizeof(__u32), - .max_entries = 2, }; int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c index 5e7df8bb5b5d..ec3d2c1c8cf9 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp.c +++ b/tools/testing/selftests/bpf/progs/test_xdp.c @@ -22,17 +22,23 @@ int _version SEC("version") = 1; -struct bpf_map_def SEC("maps") rxcnt = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u64 *value; +} rxcnt SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64), .max_entries = 256, }; -struct bpf_map_def SEC("maps") vip2tnl = { +struct { + __u32 type; + __u32 max_entries; + struct vip *key; + struct iptnl_info *value; +} vip2tnl SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct vip), - .value_size = sizeof(struct iptnl_info), .max_entries = MAX_IPTNL_ENTRIES, }; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c new file mode 100644 index 000000000000..7fa4677df22e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <stddef.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in.h> +#include <linux/udp.h> +#include <linux/tcp.h> +#include <linux/pkt_cls.h> +#include <sys/socket.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" +#include "test_iptunnel_common.h" + +int _version SEC("version") = 1; + +struct bpf_map_def SEC("maps") rxcnt = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = 256, +}; + +struct bpf_map_def SEC("maps") vip2tnl = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(struct vip), + .value_size = sizeof(struct iptnl_info), + .max_entries = MAX_IPTNL_ENTRIES, +}; + +static __always_inline void count_tx(__u32 protocol) +{ + __u64 *rxcnt_count; + + rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol); + if (rxcnt_count) + *rxcnt_count += 1; +} + +static __always_inline int get_dport(void *trans_data, void *data_end, + __u8 protocol) +{ + struct tcphdr *th; + struct udphdr *uh; + + switch (protocol) { + case IPPROTO_TCP: + th = (struct tcphdr *)trans_data; + if (th + 1 > data_end) + return -1; + return th->dest; + case IPPROTO_UDP: + uh = (struct udphdr *)trans_data; + if (uh + 1 > data_end) + return -1; + return uh->dest; + default: + return 0; + } +} + +static __always_inline void set_ethhdr(struct ethhdr *new_eth, + const struct ethhdr *old_eth, + const struct iptnl_info *tnl, + __be16 h_proto) +{ + memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source)); + memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest)); + new_eth->h_proto = h_proto; +} + +static __always_inline int handle_ipv4(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct iphdr *iph = data + sizeof(struct ethhdr); + __u16 *next_iph; + __u16 payload_len; + struct vip vip = {}; + int dport; + __u32 csum = 0; + int i; + + if (iph + 1 > data_end) + return XDP_DROP; + + dport = get_dport(iph + 1, data_end, iph->protocol); + if (dport == -1) + return XDP_DROP; + + vip.protocol = iph->protocol; + vip.family = AF_INET; + vip.daddr.v4 = iph->daddr; + vip.dport = dport; + payload_len = bpf_ntohs(iph->tot_len); + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v4-in-v4 */ + if (!tnl || tnl->family != AF_INET) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + + new_eth = data; + iph = data + sizeof(*new_eth); + old_eth = data + sizeof(*iph); + + if (new_eth + 1 > data_end || + old_eth + 1 > data_end || + iph + 1 > data_end) + return XDP_DROP; + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP)); + + iph->version = 4; + iph->ihl = sizeof(*iph) >> 2; + iph->frag_off = 0; + iph->protocol = IPPROTO_IPIP; + iph->check = 0; + iph->tos = 0; + iph->tot_len = bpf_htons(payload_len + sizeof(*iph)); + iph->daddr = tnl->daddr.v4; + iph->saddr = tnl->saddr.v4; + iph->ttl = 8; + + next_iph = (__u16 *)iph; +#pragma clang loop unroll(disable) + for (i = 0; i < sizeof(*iph) >> 1; i++) + csum += *next_iph++; + + iph->check = ~((csum & 0xffff) + (csum >> 16)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +static __always_inline int handle_ipv6(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct ipv6hdr *ip6h = data + sizeof(struct ethhdr); + __u16 payload_len; + struct vip vip = {}; + int dport; + + if (ip6h + 1 > data_end) + return XDP_DROP; + + dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr); + if (dport == -1) + return XDP_DROP; + + vip.protocol = ip6h->nexthdr; + vip.family = AF_INET6; + memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr)); + vip.dport = dport; + payload_len = ip6h->payload_len; + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v6-in-v6 */ + if (!tnl || tnl->family != AF_INET6) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + + new_eth = data; + ip6h = data + sizeof(*new_eth); + old_eth = data + sizeof(*ip6h); + + if (new_eth + 1 > data_end || old_eth + 1 > data_end || + ip6h + 1 > data_end) + return XDP_DROP; + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6)); + + ip6h->version = 6; + ip6h->priority = 0; + memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); + ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h)); + ip6h->nexthdr = IPPROTO_IPV6; + ip6h->hop_limit = 8; + memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6)); + memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +SEC("xdp_tx_iptunnel") +int _xdp_tx_iptunnel(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct ethhdr *eth = data; + __u16 h_proto; + + if (eth + 1 > data_end) + return XDP_DROP; + + h_proto = eth->h_proto; + + if (h_proto == bpf_htons(ETH_P_IP)) + return handle_ipv4(xdp); + else if (h_proto == bpf_htons(ETH_P_IPV6)) + + return handle_ipv6(xdp); + else + return XDP_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index 5e4aac74f9d0..d2eddb5553d1 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -15,13 +15,6 @@ #include <linux/udp.h> #include "bpf_helpers.h" -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - static __u32 rol32(__u32 word, unsigned int shift) { return (word << shift) | (word >> ((-shift) & 31)); @@ -170,52 +163,66 @@ struct lb_stats { __u64 v1; }; -struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = { +struct { + __u32 type; + __u32 max_entries; + struct vip_definition *key; + struct vip_meta *value; +} vip_map SEC(".maps") = { .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct vip_definition), - .value_size = sizeof(struct vip_meta), .max_entries = 512, - .map_flags = 0, }; -struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = { +struct { + __u32 type; + __u32 max_entries; + __u32 map_flags; + struct flow_key *key; + struct real_pos_lru *value; +} lru_cache SEC(".maps") = { .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(struct flow_key), - .value_size = sizeof(struct real_pos_lru), .max_entries = 300, .map_flags = 1U << 1, }; -struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + __u32 *value; +} ch_rings SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u32), .max_entries = 12 * 655, - .map_flags = 0, }; -struct bpf_map_def __attribute__ ((section("maps"), used)) reals = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct real_definition *value; +} reals SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct real_definition), .max_entries = 40, - .map_flags = 0, }; -struct bpf_map_def __attribute__ ((section("maps"), used)) stats = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct lb_stats *value; +} stats SEC(".maps") = { .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct lb_stats), .max_entries = 515, - .map_flags = 0, }; -struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = { +struct { + __u32 type; + __u32 max_entries; + __u32 *key; + struct ctl_value *value; +} ctl_array SEC(".maps") = { .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(struct ctl_value), .max_entries = 16, - .map_flags = 0, }; struct eth_hdr { diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c new file mode 100644 index 000000000000..87393e7c667c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */ + +#define KBUILD_MODNAME "foo" +#include <stddef.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/icmp.h> +#include <linux/in.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> + +#include "bpf_helpers.h" +#include "bpf_endian.h" + +#include "xdping.h" + +struct bpf_map_def SEC("maps") ping_map = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(__u32), + .value_size = sizeof(struct pinginfo), + .max_entries = 256, +}; + +static __always_inline void swap_src_dst_mac(void *data) +{ + unsigned short *p = data; + unsigned short dst[3]; + + dst[0] = p[0]; + dst[1] = p[1]; + dst[2] = p[2]; + p[0] = p[3]; + p[1] = p[4]; + p[2] = p[5]; + p[3] = dst[0]; + p[4] = dst[1]; + p[5] = dst[2]; +} + +static __always_inline __u16 csum_fold_helper(__wsum sum) +{ + sum = (sum & 0xffff) + (sum >> 16); + return ~((sum & 0xffff) + (sum >> 16)); +} + +static __always_inline __u16 ipv4_csum(void *data_start, int data_size) +{ + __wsum sum; + + sum = bpf_csum_diff(0, 0, data_start, data_size, 0); + return csum_fold_helper(sum); +} + +#define ICMP_ECHO_LEN 64 + +static __always_inline int icmp_check(struct xdp_md *ctx, int type) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + struct icmphdr *icmph; + struct iphdr *iph; + + if (data + sizeof(*eth) + sizeof(*iph) + ICMP_ECHO_LEN > data_end) + return XDP_PASS; + + if (eth->h_proto != bpf_htons(ETH_P_IP)) + return XDP_PASS; + + iph = data + sizeof(*eth); + + if (iph->protocol != IPPROTO_ICMP) + return XDP_PASS; + + if (bpf_ntohs(iph->tot_len) - sizeof(*iph) != ICMP_ECHO_LEN) + return XDP_PASS; + + icmph = data + sizeof(*eth) + sizeof(*iph); + + if (icmph->type != type) + return XDP_PASS; + + return XDP_TX; +} + +SEC("xdpclient") +int xdping_client(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct pinginfo *pinginfo = NULL; + struct ethhdr *eth = data; + struct icmphdr *icmph; + struct iphdr *iph; + __u64 recvtime; + __be32 raddr; + __be16 seq; + int ret; + __u8 i; + + ret = icmp_check(ctx, ICMP_ECHOREPLY); + + if (ret != XDP_TX) + return ret; + + iph = data + sizeof(*eth); + icmph = data + sizeof(*eth) + sizeof(*iph); + raddr = iph->saddr; + + /* Record time reply received. */ + recvtime = bpf_ktime_get_ns(); + pinginfo = bpf_map_lookup_elem(&ping_map, &raddr); + if (!pinginfo || pinginfo->seq != icmph->un.echo.sequence) + return XDP_PASS; + + if (pinginfo->start) { +#pragma clang loop unroll(full) + for (i = 0; i < XDPING_MAX_COUNT; i++) { + if (pinginfo->times[i] == 0) + break; + } + /* verifier is fussy here... */ + if (i < XDPING_MAX_COUNT) { + pinginfo->times[i] = recvtime - + pinginfo->start; + pinginfo->start = 0; + i++; + } + /* No more space for values? */ + if (i == pinginfo->count || i == XDPING_MAX_COUNT) + return XDP_PASS; + } + + /* Now convert reply back into echo request. */ + swap_src_dst_mac(data); + iph->saddr = iph->daddr; + iph->daddr = raddr; + icmph->type = ICMP_ECHO; + seq = bpf_htons(bpf_ntohs(icmph->un.echo.sequence) + 1); + icmph->un.echo.sequence = seq; + icmph->checksum = 0; + icmph->checksum = ipv4_csum(icmph, ICMP_ECHO_LEN); + + pinginfo->seq = seq; + pinginfo->start = bpf_ktime_get_ns(); + + return XDP_TX; +} + +SEC("xdpserver") +int xdping_server(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + struct icmphdr *icmph; + struct iphdr *iph; + __be32 raddr; + int ret; + + ret = icmp_check(ctx, ICMP_ECHO); + + if (ret != XDP_TX) + return ret; + + iph = data + sizeof(*eth); + icmph = data + sizeof(*eth) + sizeof(*iph); + raddr = iph->saddr; + + /* Now convert request into echo reply. */ + swap_src_dst_mac(data); + iph->saddr = iph->daddr; + iph->daddr = raddr; + icmph->type = ICMP_ECHOREPLY; + icmph->checksum = 0; + icmph->checksum = ipv4_csum(icmph, ICMP_ECHO_LEN); + + return XDP_TX; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 42c1ce988945..8351cb5f4a20 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -4016,71 +4016,18 @@ struct btf_file_test { }; static struct btf_file_test file_tests[] = { -{ - .file = "test_btf_haskv.o", -}, -{ - .file = "test_btf_nokv.o", - .btf_kv_notfound = true, -}, + { .file = "test_btf_haskv.o", }, + { .file = "test_btf_newkv.o", }, + { .file = "test_btf_nokv.o", .btf_kv_notfound = true, }, }; -static int file_has_btf_elf(const char *fn, bool *has_btf_ext) -{ - Elf_Scn *scn = NULL; - GElf_Ehdr ehdr; - int ret = 0; - int elf_fd; - Elf *elf; - - if (CHECK(elf_version(EV_CURRENT) == EV_NONE, - "elf_version(EV_CURRENT) == EV_NONE")) - return -1; - - elf_fd = open(fn, O_RDONLY); - if (CHECK(elf_fd == -1, "open(%s): errno:%d", fn, errno)) - return -1; - - elf = elf_begin(elf_fd, ELF_C_READ, NULL); - if (CHECK(!elf, "elf_begin(%s): %s", fn, elf_errmsg(elf_errno()))) { - ret = -1; - goto done; - } - - if (CHECK(!gelf_getehdr(elf, &ehdr), "!gelf_getehdr(%s)", fn)) { - ret = -1; - goto done; - } - - while ((scn = elf_nextscn(elf, scn))) { - const char *sh_name; - GElf_Shdr sh; - - if (CHECK(gelf_getshdr(scn, &sh) != &sh, - "file:%s gelf_getshdr != &sh", fn)) { - ret = -1; - goto done; - } - - sh_name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); - if (!strcmp(sh_name, BTF_ELF_SEC)) - ret = 1; - if (!strcmp(sh_name, BTF_EXT_ELF_SEC)) - *has_btf_ext = true; - } - -done: - close(elf_fd); - elf_end(elf); - return ret; -} - static int do_test_file(unsigned int test_num) { const struct btf_file_test *test = &file_tests[test_num - 1]; const char *expected_fnames[] = {"_dummy_tracepoint", "test_long_fname_1", "test_long_fname_2"}; + struct btf_ext *btf_ext = NULL; struct bpf_prog_info info = {}; struct bpf_object *obj = NULL; struct bpf_func_info *finfo; @@ -4095,15 +4042,19 @@ static int do_test_file(unsigned int test_num) fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num, test->file); - err = file_has_btf_elf(test->file, &has_btf_ext); - if (err == -1) - return err; - - if (err == 0) { - fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC); - skip_cnt++; - return 0; + btf = btf__parse_elf(test->file, &btf_ext); + if (IS_ERR(btf)) { + if (PTR_ERR(btf) == -ENOENT) { + fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC); + skip_cnt++; + return 0; + } + return PTR_ERR(btf); } + btf__free(btf); + + has_btf_ext = btf_ext != NULL; + btf_ext__free(btf_ext); obj = bpf_object__open(test->file); if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj))) diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c new file mode 100644 index 000000000000..8f850823d35f --- /dev/null +++ b/tools/testing/selftests/bpf/test_btf_dump.c @@ -0,0 +1,143 @@ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <errno.h> +#include <linux/err.h> +#include <btf.h> + +#define CHECK(condition, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \ + fprintf(stderr, format); \ + } \ + __ret; \ +}) + +void btf_dump_printf(void *ctx, const char *fmt, va_list args) +{ + vfprintf(ctx, fmt, args); +} + +struct btf_dump_test_case { + const char *name; + struct btf_dump_opts opts; +} btf_dump_test_cases[] = { + {.name = "btf_dump_test_case_syntax", .opts = {}}, + {.name = "btf_dump_test_case_ordering", .opts = {}}, + {.name = "btf_dump_test_case_padding", .opts = {}}, + {.name = "btf_dump_test_case_packing", .opts = {}}, + {.name = "btf_dump_test_case_bitfields", .opts = {}}, + {.name = "btf_dump_test_case_multidim", .opts = {}}, + {.name = "btf_dump_test_case_namespacing", .opts = {}}, +}; + +static int btf_dump_all_types(const struct btf *btf, + const struct btf_dump_opts *opts) +{ + size_t type_cnt = btf__get_nr_types(btf); + struct btf_dump *d; + int err = 0, id; + + d = btf_dump__new(btf, NULL, opts, btf_dump_printf); + if (IS_ERR(d)) + return PTR_ERR(d); + + for (id = 1; id <= type_cnt; id++) { + err = btf_dump__dump_type(d, id); + if (err) + goto done; + } + +done: + btf_dump__free(d); + return err; +} + +int test_btf_dump_case(int n, struct btf_dump_test_case *test_case) +{ + char test_file[256], out_file[256], diff_cmd[1024]; + struct btf *btf = NULL; + int err = 0, fd = -1; + FILE *f = NULL; + + fprintf(stderr, "Test case #%d (%s): ", n, test_case->name); + + snprintf(test_file, sizeof(test_file), "%s.o", test_case->name); + + btf = btf__parse_elf(test_file, NULL); + if (CHECK(IS_ERR(btf), + "failed to load test BTF: %ld\n", PTR_ERR(btf))) { + err = -PTR_ERR(btf); + btf = NULL; + goto done; + } + + snprintf(out_file, sizeof(out_file), + "/tmp/%s.output.XXXXXX", test_case->name); + fd = mkstemp(out_file); + if (CHECK(fd < 0, "failed to create temp output file: %d\n", fd)) { + err = fd; + goto done; + } + f = fdopen(fd, "w"); + if (CHECK(f == NULL, "failed to open temp output file: %s(%d)\n", + strerror(errno), errno)) { + close(fd); + goto done; + } + + test_case->opts.ctx = f; + err = btf_dump_all_types(btf, &test_case->opts); + fclose(f); + close(fd); + if (CHECK(err, "failure during C dumping: %d\n", err)) { + goto done; + } + + snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); + /* + * Diff test output and expected test output, contained between + * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. + * For expected output lines, everything before '*' is stripped out. + * Also lines containing comment start and comment end markers are + * ignored. + */ + snprintf(diff_cmd, sizeof(diff_cmd), + "awk '/START-EXPECTED-OUTPUT/{out=1;next} " + "/END-EXPECTED-OUTPUT/{out=0} " + "/\\/\\*|\\*\\//{next} " /* ignore comment start/end lines */ + "out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'", + test_file, out_file); + err = system(diff_cmd); + if (CHECK(err, + "differing test output, output=%s, err=%d, diff cmd:\n%s\n", + out_file, err, diff_cmd)) + goto done; + + remove(out_file); + fprintf(stderr, "OK\n"); + +done: + btf__free(btf); + return err; +} + +int main() { + int test_case_cnt, i, err, failed = 0; + + test_case_cnt = sizeof(btf_dump_test_cases) / + sizeof(btf_dump_test_cases[0]); + + for (i = 0; i < test_case_cnt; i++) { + err = test_btf_dump_case(i, &btf_dump_test_cases[i]); + if (err) + failed++; + } + + fprintf(stderr, "%d tests succeeded, %d tests failed.\n", + test_case_cnt - failed, failed); + + return failed; +} diff --git a/tools/testing/selftests/bpf/test_cgroup_attach.c b/tools/testing/selftests/bpf/test_cgroup_attach.c new file mode 100644 index 000000000000..7671909ee1cb --- /dev/null +++ b/tools/testing/selftests/bpf/test_cgroup_attach.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* eBPF example program: + * + * - Creates arraymap in kernel with 4 bytes keys and 8 byte values + * + * - Loads eBPF program + * + * The eBPF program accesses the map passed in to store two pieces of + * information. The number of invocations of the program, which maps + * to the number of packets received, is stored to key 0. Key 1 is + * incremented on each iteration by the number of bytes stored in + * the skb. The program also stores the number of received bytes + * in the cgroup storage. + * + * - Attaches the new program to a cgroup using BPF_PROG_ATTACH + * + * - Every second, reads map[0] and map[1] to see how many bytes and + * packets were seen on any socket of tasks in the given cgroup. + */ + +#define _GNU_SOURCE + +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <sys/resource.h> +#include <sys/time.h> +#include <unistd.h> +#include <linux/filter.h> + +#include <linux/bpf.h> +#include <bpf/bpf.h> + +#include "bpf_util.h" +#include "bpf_rlimit.h" +#include "cgroup_helpers.h" + +#define FOO "/foo" +#define BAR "/foo/bar/" +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +#ifdef DEBUG +#define debug(args...) printf(args) +#else +#define debug(args...) +#endif + +static int prog_load(int verdict) +{ + int ret; + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); + + if (ret < 0) { + log_err("Loading program"); + printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); + return 0; + } + return ret; +} + +static int test_foo_bar(void) +{ + int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0; + + allow_prog = prog_load(1); + if (!allow_prog) + goto err; + + drop_prog = prog_load(0); + if (!drop_prog) + goto err; + + if (setup_cgroup_environment()) + goto err; + + /* Create cgroup /foo, get fd, and join it */ + foo = create_and_get_cgroup(FOO); + if (foo < 0) + goto err; + + if (join_cgroup(FOO)) + goto err; + + if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + log_err("Attaching prog to /foo"); + goto err; + } + + debug("Attached DROP prog. This ping in cgroup /foo should fail...\n"); + assert(system(PING_CMD) != 0); + + /* Create cgroup /foo/bar, get fd, and join it */ + bar = create_and_get_cgroup(BAR); + if (bar < 0) + goto err; + + if (join_cgroup(BAR)) + goto err; + + debug("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); + assert(system(PING_CMD) != 0); + + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + log_err("Attaching prog to /foo/bar"); + goto err; + } + + debug("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); + assert(system(PING_CMD) == 0); + + if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching program from /foo/bar"); + goto err; + } + + debug("Detached PASS from /foo/bar while DROP is attached to /foo.\n" + "This ping in cgroup /foo/bar should fail...\n"); + assert(system(PING_CMD) != 0); + + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + log_err("Attaching prog to /foo/bar"); + goto err; + } + + if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching program from /foo"); + goto err; + } + + debug("Attached PASS from /foo/bar and detached DROP from /foo.\n" + "This ping in cgroup /foo/bar should pass...\n"); + assert(system(PING_CMD) == 0); + + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + log_err("Attaching prog to /foo/bar"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { + errno = 0; + log_err("Unexpected success attaching prog to /foo/bar"); + goto err; + } + + if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching program from /foo/bar"); + goto err; + } + + if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { + errno = 0; + log_err("Unexpected success in double detach from /foo"); + goto err; + } + + if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { + log_err("Attaching non-overridable prog to /foo"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { + errno = 0; + log_err("Unexpected success attaching non-overridable prog to /foo/bar"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + errno = 0; + log_err("Unexpected success attaching overridable prog to /foo/bar"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + errno = 0; + log_err("Unexpected success attaching overridable prog to /foo"); + goto err; + } + + if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { + log_err("Attaching different non-overridable prog to /foo"); + goto err; + } + + goto out; + +err: + rc = 1; + +out: + close(foo); + close(bar); + cleanup_cgroup_environment(); + if (!rc) + printf("#override:PASS\n"); + else + printf("#override:FAIL\n"); + return rc; +} + +static int map_fd = -1; + +static int prog_load_cnt(int verdict, int val) +{ + int cgroup_storage_fd, percpu_cgroup_storage_fd; + + if (map_fd < 0) + map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + if (map_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + percpu_cgroup_storage_fd = bpf_create_map( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (percpu_cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + struct bpf_insn prog[] = { + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + + BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, val), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), + + BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + int ret; + + ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); + + if (ret < 0) { + log_err("Loading program"); + printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); + return 0; + } + close(cgroup_storage_fd); + return ret; +} + + +static int test_multiprog(void) +{ + __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; + int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; + int drop_prog, allow_prog[6] = {}, rc = 0; + unsigned long long value; + int i = 0; + + for (i = 0; i < 6; i++) { + allow_prog[i] = prog_load_cnt(1, 1 << i); + if (!allow_prog[i]) + goto err; + } + drop_prog = prog_load_cnt(0, 1); + if (!drop_prog) + goto err; + + if (setup_cgroup_environment()) + goto err; + + cg1 = create_and_get_cgroup("/cg1"); + if (cg1 < 0) + goto err; + cg2 = create_and_get_cgroup("/cg1/cg2"); + if (cg2 < 0) + goto err; + cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); + if (cg3 < 0) + goto err; + cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); + if (cg4 < 0) + goto err; + cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); + if (cg5 < 0) + goto err; + + if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) + goto err; + + if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI)) { + log_err("Attaching prog to cg1"); + goto err; + } + if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI)) { + log_err("Unexpected success attaching the same prog to cg1"); + goto err; + } + if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI)) { + log_err("Attaching prog2 to cg1"); + goto err; + } + if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + log_err("Attaching prog to cg2"); + goto err; + } + if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI)) { + log_err("Attaching prog to cg3"); + goto err; + } + if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE)) { + log_err("Attaching prog to cg4"); + goto err; + } + if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) { + log_err("Attaching prog to cg5"); + goto err; + } + assert(system(PING_CMD) == 0); + assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); + assert(value == 1 + 2 + 8 + 32); + + /* query the number of effective progs in cg5 */ + assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, + NULL, NULL, &prog_cnt) == 0); + assert(prog_cnt == 4); + /* retrieve prog_ids of effective progs in cg5 */ + assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, + &attach_flags, prog_ids, &prog_cnt) == 0); + assert(prog_cnt == 4); + assert(attach_flags == 0); + saved_prog_id = prog_ids[0]; + /* check enospc handling */ + prog_ids[0] = 0; + prog_cnt = 2; + assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, + &attach_flags, prog_ids, &prog_cnt) == -1 && + errno == ENOSPC); + assert(prog_cnt == 4); + /* check that prog_ids are returned even when buffer is too small */ + assert(prog_ids[0] == saved_prog_id); + /* retrieve prog_id of single attached prog in cg5 */ + prog_ids[0] = 0; + assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, + NULL, prog_ids, &prog_cnt) == 0); + assert(prog_cnt == 1); + assert(prog_ids[0] == saved_prog_id); + + /* detach bottom program and ping again */ + if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching prog from cg5"); + goto err; + } + value = 0; + assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); + assert(system(PING_CMD) == 0); + assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); + assert(value == 1 + 2 + 8 + 16); + + /* detach 3rd from bottom program and ping again */ + errno = 0; + if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) { + log_err("Unexpected success on detach from cg3"); + goto err; + } + if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching from cg3"); + goto err; + } + value = 0; + assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); + assert(system(PING_CMD) == 0); + assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); + assert(value == 1 + 2 + 16); + + /* detach 2nd from bottom program and ping again */ + if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching prog from cg4"); + goto err; + } + value = 0; + assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); + assert(system(PING_CMD) == 0); + assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); + assert(value == 1 + 2 + 4); + + prog_cnt = 4; + assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, + &attach_flags, prog_ids, &prog_cnt) == 0); + assert(prog_cnt == 3); + assert(attach_flags == 0); + assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, + NULL, prog_ids, &prog_cnt) == 0); + assert(prog_cnt == 0); + goto out; +err: + rc = 1; + +out: + for (i = 0; i < 6; i++) + if (allow_prog[i] > 0) + close(allow_prog[i]); + close(cg1); + close(cg2); + close(cg3); + close(cg4); + close(cg5); + cleanup_cgroup_environment(); + if (!rc) + printf("#multi:PASS\n"); + else + printf("#multi:FAIL\n"); + return rc; +} + +static int test_autodetach(void) +{ + __u32 prog_cnt = 4, attach_flags; + int allow_prog[2] = {0}; + __u32 prog_ids[2] = {0}; + int cg = 0, i, rc = -1; + void *ptr = NULL; + int attempts; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load_cnt(1, 1 << i); + if (!allow_prog[i]) + goto err; + } + + if (setup_cgroup_environment()) + goto err; + + /* create a cgroup, attach two programs and remember their ids */ + cg = create_and_get_cgroup("/cg_autodetach"); + if (cg < 0) + goto err; + + if (join_cgroup("/cg_autodetach")) + goto err; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + if (bpf_prog_attach(allow_prog[i], cg, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI)) { + log_err("Attaching prog[%d] to cg:egress", i); + goto err; + } + } + + /* make sure that programs are attached and run some traffic */ + assert(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, + prog_ids, &prog_cnt) == 0); + assert(system(PING_CMD) == 0); + + /* allocate some memory (4Mb) to pin the original cgroup */ + ptr = malloc(4 * (1 << 20)); + if (!ptr) + goto err; + + /* close programs and cgroup fd */ + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + close(allow_prog[i]); + allow_prog[i] = 0; + } + + close(cg); + cg = 0; + + /* leave the cgroup and remove it. don't detach programs */ + cleanup_cgroup_environment(); + + /* wait for the asynchronous auto-detachment. + * wait for no more than 5 sec and give up. + */ + for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { + for (attempts = 5; attempts >= 0; attempts--) { + int fd = bpf_prog_get_fd_by_id(prog_ids[i]); + + if (fd < 0) + break; + + /* don't leave the fd open */ + close(fd); + + if (!attempts) + goto err; + + sleep(1); + } + } + + rc = 0; +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] > 0) + close(allow_prog[i]); + if (cg) + close(cg); + free(ptr); + cleanup_cgroup_environment(); + if (!rc) + printf("#autodetach:PASS\n"); + else + printf("#autodetach:FAIL\n"); + return rc; +} + +int main(void) +{ + int (*tests[])(void) = { + test_foo_bar, + test_multiprog, + test_autodetach, + }; + int errors = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) + if (tests[i]()) + errors++; + + if (errors) + printf("test_cgroup_attach:FAIL\n"); + else + printf("test_cgroup_attach:PASS\n"); + + return errors ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tools/testing/selftests/bpf/test_hashmap.c b/tools/testing/selftests/bpf/test_hashmap.c new file mode 100644 index 000000000000..b64094c981e3 --- /dev/null +++ b/tools/testing/selftests/bpf/test_hashmap.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Tests for libbpf's hashmap. + * + * Copyright (c) 2019 Facebook + */ +#include <stdio.h> +#include <errno.h> +#include <linux/err.h> +#include "hashmap.h" + +#define CHECK(condition, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \ + fprintf(stderr, format); \ + } \ + __ret; \ +}) + +size_t hash_fn(const void *k, void *ctx) +{ + return (long)k; +} + +bool equal_fn(const void *a, const void *b, void *ctx) +{ + return (long)a == (long)b; +} + +static inline size_t next_pow_2(size_t n) +{ + size_t r = 1; + + while (r < n) + r <<= 1; + return r; +} + +static inline size_t exp_cap(size_t sz) +{ + size_t r = next_pow_2(sz); + + if (sz * 4 / 3 > r) + r <<= 1; + return r; +} + +#define ELEM_CNT 62 + +int test_hashmap_generic(void) +{ + struct hashmap_entry *entry, *tmp; + int err, bkt, found_cnt, i; + long long found_msk; + struct hashmap *map; + + fprintf(stderr, "%s: ", __func__); + + map = hashmap__new(hash_fn, equal_fn, NULL); + if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) + return 1; + + for (i = 0; i < ELEM_CNT; i++) { + const void *oldk, *k = (const void *)(long)i; + void *oldv, *v = (void *)(long)(1024 + i); + + err = hashmap__update(map, k, v, &oldk, &oldv); + if (CHECK(err != -ENOENT, "unexpected result: %d\n", err)) + return 1; + + if (i % 2) { + err = hashmap__add(map, k, v); + } else { + err = hashmap__set(map, k, v, &oldk, &oldv); + if (CHECK(oldk != NULL || oldv != NULL, + "unexpected k/v: %p=%p\n", oldk, oldv)) + return 1; + } + + if (CHECK(err, "failed to add k/v %ld = %ld: %d\n", + (long)k, (long)v, err)) + return 1; + + if (CHECK(!hashmap__find(map, k, &oldv), + "failed to find key %ld\n", (long)k)) + return 1; + if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv)) + return 1; + } + + if (CHECK(hashmap__size(map) != ELEM_CNT, + "invalid map size: %zu\n", hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + return 1; + + found_msk = 0; + hashmap__for_each_entry(map, entry, bkt) { + long k = (long)entry->key; + long v = (long)entry->value; + + found_msk |= 1ULL << k; + if (CHECK(v - k != 1024, "invalid k/v pair: %ld = %ld\n", k, v)) + return 1; + } + if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, + "not all keys iterated: %llx\n", found_msk)) + return 1; + + for (i = 0; i < ELEM_CNT; i++) { + const void *oldk, *k = (const void *)(long)i; + void *oldv, *v = (void *)(long)(256 + i); + + err = hashmap__add(map, k, v); + if (CHECK(err != -EEXIST, "unexpected add result: %d\n", err)) + return 1; + + if (i % 2) + err = hashmap__update(map, k, v, &oldk, &oldv); + else + err = hashmap__set(map, k, v, &oldk, &oldv); + + if (CHECK(err, "failed to update k/v %ld = %ld: %d\n", + (long)k, (long)v, err)) + return 1; + if (CHECK(!hashmap__find(map, k, &oldv), + "failed to find key %ld\n", (long)k)) + return 1; + if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv)) + return 1; + } + + if (CHECK(hashmap__size(map) != ELEM_CNT, + "invalid updated map size: %zu\n", hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + return 1; + + found_msk = 0; + hashmap__for_each_entry_safe(map, entry, tmp, bkt) { + long k = (long)entry->key; + long v = (long)entry->value; + + found_msk |= 1ULL << k; + if (CHECK(v - k != 256, + "invalid updated k/v pair: %ld = %ld\n", k, v)) + return 1; + } + if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, + "not all keys iterated after update: %llx\n", found_msk)) + return 1; + + found_cnt = 0; + hashmap__for_each_key_entry(map, entry, (void *)0) { + found_cnt++; + } + if (CHECK(!found_cnt, "didn't find any entries for key 0\n")) + return 1; + + found_msk = 0; + found_cnt = 0; + hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) { + const void *oldk, *k; + void *oldv, *v; + + k = entry->key; + v = entry->value; + + found_cnt++; + found_msk |= 1ULL << (long)k; + + if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), + "failed to delete k/v %ld = %ld\n", + (long)k, (long)v)) + return 1; + if (CHECK(oldk != k || oldv != v, + "invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n", + (long)k, (long)v, (long)oldk, (long)oldv)) + return 1; + if (CHECK(hashmap__delete(map, k, &oldk, &oldv), + "unexpectedly deleted k/v %ld = %ld\n", + (long)oldk, (long)oldv)) + return 1; + } + + if (CHECK(!found_cnt || !found_msk, + "didn't delete any key entries\n")) + return 1; + if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, + "invalid updated map size (already deleted: %d): %zu\n", + found_cnt, hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + return 1; + + hashmap__for_each_entry_safe(map, entry, tmp, bkt) { + const void *oldk, *k; + void *oldv, *v; + + k = entry->key; + v = entry->value; + + found_cnt++; + found_msk |= 1ULL << (long)k; + + if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), + "failed to delete k/v %ld = %ld\n", + (long)k, (long)v)) + return 1; + if (CHECK(oldk != k || oldv != v, + "invalid old k/v: expect %ld = %ld, got %ld = %ld\n", + (long)k, (long)v, (long)oldk, (long)oldv)) + return 1; + if (CHECK(hashmap__delete(map, k, &oldk, &oldv), + "unexpectedly deleted k/v %ld = %ld\n", + (long)k, (long)v)) + return 1; + } + + if (CHECK(found_cnt != ELEM_CNT || found_msk != (1ULL << ELEM_CNT) - 1, + "not all keys were deleted: found_cnt:%d, found_msk:%llx\n", + found_cnt, found_msk)) + return 1; + if (CHECK(hashmap__size(map) != 0, + "invalid updated map size (already deleted: %d): %zu\n", + found_cnt, hashmap__size(map))) + return 1; + + found_cnt = 0; + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "unexpected map entries left: %ld = %ld\n", + (long)entry->key, (long)entry->value); + return 1; + } + + hashmap__free(map); + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "unexpected map entries left: %ld = %ld\n", + (long)entry->key, (long)entry->value); + return 1; + } + + fprintf(stderr, "OK\n"); + return 0; +} + +size_t collision_hash_fn(const void *k, void *ctx) +{ + return 0; +} + +int test_hashmap_multimap(void) +{ + void *k1 = (void *)0, *k2 = (void *)1; + struct hashmap_entry *entry; + struct hashmap *map; + long found_msk; + int err, bkt; + + fprintf(stderr, "%s: ", __func__); + + /* force collisions */ + map = hashmap__new(collision_hash_fn, equal_fn, NULL); + if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) + return 1; + + + /* set up multimap: + * [0] -> 1, 2, 4; + * [1] -> 8, 16, 32; + */ + err = hashmap__append(map, k1, (void *)1); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k1, (void *)2); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k1, (void *)4); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + + err = hashmap__append(map, k2, (void *)8); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k2, (void *)16); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + err = hashmap__append(map, k2, (void *)32); + if (CHECK(err, "failed to add k/v: %d\n", err)) + return 1; + + if (CHECK(hashmap__size(map) != 6, + "invalid map size: %zu\n", hashmap__size(map))) + return 1; + + /* verify global iteration still works and sees all values */ + found_msk = 0; + hashmap__for_each_entry(map, entry, bkt) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (1 << 6) - 1, + "not all keys iterated: %lx\n", found_msk)) + return 1; + + /* iterate values for key 1 */ + found_msk = 0; + hashmap__for_each_key_entry(map, entry, k1) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (1 | 2 | 4), + "invalid k1 values: %lx\n", found_msk)) + return 1; + + /* iterate values for key 2 */ + found_msk = 0; + hashmap__for_each_key_entry(map, entry, k2) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (8 | 16 | 32), + "invalid k2 values: %lx\n", found_msk)) + return 1; + + fprintf(stderr, "OK\n"); + return 0; +} + +int test_hashmap_empty() +{ + struct hashmap_entry *entry; + int bkt; + struct hashmap *map; + void *k = (void *)0; + + fprintf(stderr, "%s: ", __func__); + + /* force collisions */ + map = hashmap__new(hash_fn, equal_fn, NULL); + if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) + return 1; + + if (CHECK(hashmap__size(map) != 0, + "invalid map size: %zu\n", hashmap__size(map))) + return 1; + if (CHECK(hashmap__capacity(map) != 0, + "invalid map capacity: %zu\n", hashmap__capacity(map))) + return 1; + if (CHECK(hashmap__find(map, k, NULL), "unexpected find\n")) + return 1; + if (CHECK(hashmap__delete(map, k, NULL, NULL), "unexpected delete\n")) + return 1; + + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "unexpected iterated entry\n"); + return 1; + } + hashmap__for_each_key_entry(map, entry, k) { + CHECK(false, "unexpected key entry\n"); + return 1; + } + + fprintf(stderr, "OK\n"); + return 0; +} + +int main(int argc, char **argv) +{ + bool failed = false; + + if (test_hashmap_generic()) + failed = true; + if (test_hashmap_multimap()) + failed = true; + if (test_hashmap_empty()) + failed = true; + + return failed; +} diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c index 75646d9b34aa..7566c13eb51a 100644 --- a/tools/testing/selftests/bpf/test_select_reuseport.c +++ b/tools/testing/selftests/bpf/test_select_reuseport.c @@ -523,6 +523,58 @@ static void test_pass_on_err(int type, sa_family_t family) printf("OK\n"); } +static void test_detach_bpf(int type, sa_family_t family) +{ +#ifdef SO_DETACH_REUSEPORT_BPF + __u32 nr_run_before = 0, nr_run_after = 0, tmp, i; + struct epoll_event ev; + int cli_fd, err, nev; + struct cmd cmd = {}; + int optvalue = 0; + + printf("%s: ", __func__); + err = setsockopt(sk_fds[0], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF, + &optvalue, sizeof(optvalue)); + CHECK(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)", + "err:%d errno:%d\n", err, errno); + + err = setsockopt(sk_fds[1], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF, + &optvalue, sizeof(optvalue)); + CHECK(err == 0 || errno != ENOENT, "setsockopt(SO_DETACH_REUSEPORT_BPF)", + "err:%d errno:%d\n", err, errno); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &tmp); + CHECK(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + nr_run_before += tmp; + } + + cli_fd = send_data(type, family, &cmd, sizeof(cmd), PASS); + nev = epoll_wait(epfd, &ev, 1, 5); + CHECK(nev <= 0, "nev <= 0", + "nev:%d expected:1 type:%d family:%d data:(0, 0)\n", + nev, type, family); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &tmp); + CHECK(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + nr_run_after += tmp; + } + + CHECK(nr_run_before != nr_run_after, + "nr_run_before != nr_run_after", + "nr_run_before:%u nr_run_after:%u\n", + nr_run_before, nr_run_after); + + printf("OK\n"); + close(cli_fd); +#else + printf("%s: SKIP\n", __func__); +#endif +} + static void prepare_sk_fds(int type, sa_family_t family, bool inany) { const int first = REUSEPORT_ARRAY_SIZE - 1; @@ -664,6 +716,8 @@ static void test_all(void) test_pass(type, family); test_syncookie(type, family); test_pass_on_err(type, family); + /* Must be the last test */ + test_detach_bpf(type, family); cleanup_per_test(); printf("\n"); diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 4ecde2392327..61fd95b89af8 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -836,6 +836,7 @@ static int load_path(const struct sock_addr_test *test, const char *path) attr.file = path; attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; attr.expected_attach_type = test->expected_attach_type; + attr.prog_flags = BPF_F_TEST_RND_HI32; if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { if (test->expected_result != LOAD_REJECT) diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c index e089477fa0a3..f0fc103261a4 100644 --- a/tools/testing/selftests/bpf/test_sock_fields.c +++ b/tools/testing/selftests/bpf/test_sock_fields.c @@ -414,6 +414,7 @@ int main(int argc, char **argv) struct bpf_prog_load_attr attr = { .file = "test_sock_fields_kern.o", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .prog_flags = BPF_F_TEST_RND_HI32, }; int cgroup_fd, egress_fd, ingress_fd, err; struct bpf_program *ingress_prog; diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index e51d63786ff8..15653b0e26eb 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c @@ -18,6 +18,11 @@ #define CG_PATH "/foo" #define SOCKET_COOKIE_PROG "./socket_cookie_prog.o" +struct socket_cookie { + __u64 cookie_key; + __u32 cookie_value; +}; + static int start_server(void) { struct sockaddr_in6 addr; @@ -89,8 +94,7 @@ static int validate_map(struct bpf_map *map, int client_fd) __u32 cookie_expected_value; struct sockaddr_in6 addr; socklen_t len = sizeof(addr); - __u32 cookie_value; - __u64 cookie_key; + struct socket_cookie val; int err = 0; int map_fd; @@ -101,17 +105,7 @@ static int validate_map(struct bpf_map *map, int client_fd) map_fd = bpf_map__fd(map); - err = bpf_map_get_next_key(map_fd, NULL, &cookie_key); - if (err) { - log_err("Can't get cookie key from map"); - goto out; - } - - err = bpf_map_lookup_elem(map_fd, &cookie_key, &cookie_value); - if (err) { - log_err("Can't get cookie value from map"); - goto out; - } + err = bpf_map_lookup_elem(map_fd, &client_fd, &val); err = getsockname(client_fd, (struct sockaddr *)&addr, &len); if (err) { @@ -120,8 +114,8 @@ static int validate_map(struct bpf_map *map, int client_fd) } cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF; - if (cookie_value != cookie_expected_value) { - log_err("Unexpected value in map: %x != %x", cookie_value, + if (val.cookie_value != cookie_expected_value) { + log_err("Unexpected value in map: %x != %x", val.cookie_value, cookie_expected_value); goto err; } @@ -148,6 +142,7 @@ static int run_test(int cgfd) memset(&attr, 0, sizeof(attr)); attr.file = SOCKET_COOKIE_PROG; attr.prog_type = BPF_PROG_TYPE_UNSPEC; + attr.prog_flags = BPF_F_TEST_RND_HI32; err = bpf_prog_load_xattr(&attr, &pobj, &prog_fd); if (err) { diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.h b/tools/testing/selftests/bpf/test_sockmap_kern.h index e7639f66a941..4e7d3da21357 100644 --- a/tools/testing/selftests/bpf/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/test_sockmap_kern.h @@ -28,13 +28,6 @@ * are established and verdicts are decided. */ -#define bpf_printk(fmt, ...) \ -({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ -}) - struct bpf_map_def SEC("maps") sock_map = { .type = TEST_MAP_TYPE, .key_size = sizeof(int), diff --git a/tools/testing/selftests/bpf/test_stub.c b/tools/testing/selftests/bpf/test_stub.c new file mode 100644 index 000000000000..84e81a89e2f9 --- /dev/null +++ b/tools/testing/selftests/bpf/test_stub.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include <string.h> + +int bpf_prog_test_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_prog_load_attr attr; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = file; + attr.prog_type = type; + attr.expected_attach_type = 0; + attr.prog_flags = BPF_F_TEST_RND_HI32; + + return bpf_prog_load_xattr(&attr, pobj, prog_fd); +} + +int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz) +{ + struct bpf_load_program_attr load_attr; + + memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); + load_attr.prog_type = type; + load_attr.expected_attach_type = 0; + load_attr.name = NULL; + load_attr.insns = insns; + load_attr.insns_cnt = insns_cnt; + load_attr.license = license; + load_attr.kern_version = kern_version; + load_attr.prog_flags = BPF_F_TEST_RND_HI32; + + return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); +} diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index 546aee3e9fb4..bd12ec97a44d 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -696,30 +696,57 @@ check_err() bpf_tunnel_test() { + local errors=0 + echo "Testing GRE tunnel..." test_gre + errors=$(( $errors + $? )) + echo "Testing IP6GRE tunnel..." test_ip6gre + errors=$(( $errors + $? )) + echo "Testing IP6GRETAP tunnel..." test_ip6gretap + errors=$(( $errors + $? )) + echo "Testing ERSPAN tunnel..." test_erspan v2 + errors=$(( $errors + $? )) + echo "Testing IP6ERSPAN tunnel..." test_ip6erspan v2 + errors=$(( $errors + $? )) + echo "Testing VXLAN tunnel..." test_vxlan + errors=$(( $errors + $? )) + echo "Testing IP6VXLAN tunnel..." test_ip6vxlan + errors=$(( $errors + $? )) + echo "Testing GENEVE tunnel..." test_geneve + errors=$(( $errors + $? )) + echo "Testing IP6GENEVE tunnel..." test_ip6geneve + errors=$(( $errors + $? )) + echo "Testing IPIP tunnel..." test_ipip + errors=$(( $errors + $? )) + echo "Testing IPIP6 tunnel..." test_ipip6 + errors=$(( $errors + $? )) + echo "Testing IPSec tunnel..." test_xfrm_tunnel + errors=$(( $errors + $? )) + + return $errors } trap cleanup 0 3 6 @@ -728,4 +755,9 @@ trap cleanup_exit 2 9 cleanup bpf_tunnel_test +if [ $? -ne 0 ]; then + echo -e "$(basename $0): ${RED}FAIL${NC}" + exit 1 +fi +echo -e "$(basename $0): ${GREEN}PASS${NC}" exit 0 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 288cb740e005..c5514daf8865 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -135,32 +135,36 @@ static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) loop: for (j = 0; j < PUSH_CNT; j++) { insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2); + /* jump to error label */ + insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); i++; insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1); insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2); insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2); + insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); i++; } for (j = 0; j < PUSH_CNT; j++) { insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2); + insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); i++; insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_pop), - insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2); + insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); i++; } if (++k < 5) goto loop; - for (; i < len - 1; i++) - insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef); + for (; i < len - 3; i++) + insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef); + insn[len - 3] = BPF_JMP_A(1); + /* error label */ + insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0); insn[len - 1] = BPF_EXIT_INSN(); self->prog_len = len; } @@ -168,8 +172,13 @@ loop: static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) { struct bpf_insn *insn = self->fill_insns; - /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */ - unsigned int len = (1 << 15) / 6; + /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns, + * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted + * to extend the error value of the inlined ld_abs sequence which then + * contains 7 insns. so, set the dividend to 7 so the testcase could + * work on all arches. + */ + unsigned int len = (1 << 15) / 7; int i = 0; insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); @@ -207,33 +216,35 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) self->retval = (uint32_t)res; } -/* test the sequence of 1k jumps */ +#define MAX_JMP_SEQ 8192 + +/* test the sequence of 8k jumps */ static void bpf_fill_scale1(struct bpf_test *self) { struct bpf_insn *insn = self->fill_insns; int i = 0, k = 0; insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); - /* test to check that the sequence of 1024 jumps is acceptable */ - while (k++ < 1024) { + /* test to check that the long sequence of jumps is acceptable */ + while (k++ < MAX_JMP_SEQ) { insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32); - insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2); + insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, -8 * (k % 64 + 1)); } - /* every jump adds 1024 steps to insn_processed, so to stay exactly - * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT + /* is_state_visited() doesn't allocate state for pruning for every jump. + * Hence multiply jmps by 4 to accommodate that heuristic */ - while (i < MAX_TEST_INSNS - 1025) - insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); + while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) + insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); insn[i] = BPF_EXIT_INSN(); self->prog_len = i + 1; self->retval = 42; } -/* test the sequence of 1k jumps in inner most function (function depth 8)*/ +/* test the sequence of 8k jumps in inner most function (function depth 8)*/ static void bpf_fill_scale2(struct bpf_test *self) { struct bpf_insn *insn = self->fill_insns; @@ -245,20 +256,18 @@ static void bpf_fill_scale2(struct bpf_test *self) insn[i++] = BPF_EXIT_INSN(); } insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); - /* test to check that the sequence of 1024 jumps is acceptable */ - while (k++ < 1024) { + /* test to check that the long sequence of jumps is acceptable */ + k = 0; + while (k++ < MAX_JMP_SEQ) { insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32); - insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2); + insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, -8 * (k % (64 - 4 * FUNC_NEST) + 1)); } - /* every jump adds 1024 steps to insn_processed, so to stay exactly - * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT - */ - while (i < MAX_TEST_INSNS - 1025) - insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42); + while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) + insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); insn[i] = BPF_EXIT_INSN(); self->prog_len = i + 1; self->retval = 42; @@ -867,7 +876,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, if (fixup_skips != skips) return; - pflags = 0; + pflags = BPF_F_TEST_RND_HI32; if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) pflags |= BPF_F_STRICT_ALIGNMENT; if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) diff --git a/tools/testing/selftests/bpf/test_xdping.sh b/tools/testing/selftests/bpf/test_xdping.sh new file mode 100755 index 000000000000..c2f0ddb45531 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdping.sh @@ -0,0 +1,99 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# xdping tests +# Here we setup and teardown configuration required to run +# xdping, exercising its options. +# +# Setup is similar to test_tunnel tests but without the tunnel. +# +# Topology: +# --------- +# root namespace | tc_ns0 namespace +# | +# ---------- | ---------- +# | veth1 | --------- | veth0 | +# ---------- peer ---------- +# +# Device Configuration +# -------------------- +# Root namespace with BPF +# Device names and addresses: +# veth1 IP: 10.1.1.200 +# xdp added to veth1, xdpings originate from here. +# +# Namespace tc_ns0 with BPF +# Device names and addresses: +# veth0 IPv4: 10.1.1.100 +# For some tests xdping run in server mode here. +# + +readonly TARGET_IP="10.1.1.100" +readonly TARGET_NS="xdp_ns0" + +readonly LOCAL_IP="10.1.1.200" + +setup() +{ + ip netns add $TARGET_NS + ip link add veth0 type veth peer name veth1 + ip link set veth0 netns $TARGET_NS + ip netns exec $TARGET_NS ip addr add ${TARGET_IP}/24 dev veth0 + ip addr add ${LOCAL_IP}/24 dev veth1 + ip netns exec $TARGET_NS ip link set veth0 up + ip link set veth1 up +} + +cleanup() +{ + set +e + ip netns delete $TARGET_NS 2>/dev/null + ip link del veth1 2>/dev/null + if [[ $server_pid -ne 0 ]]; then + kill -TERM $server_pid + fi +} + +test() +{ + client_args="$1" + server_args="$2" + + echo "Test client args '$client_args'; server args '$server_args'" + + server_pid=0 + if [[ -n "$server_args" ]]; then + ip netns exec $TARGET_NS ./xdping $server_args & + server_pid=$! + sleep 10 + fi + ./xdping $client_args $TARGET_IP + + if [[ $server_pid -ne 0 ]]; then + kill -TERM $server_pid + server_pid=0 + fi + + echo "Test client args '$client_args'; server args '$server_args': PASS" +} + +set -e + +server_pid=0 + +trap cleanup EXIT + +setup + +for server_args in "" "-I veth0 -s -S" ; do + # client in skb mode + client_args="-I veth1 -S" + test "$client_args" "$server_args" + + # client with count of 10 RTT measurements. + client_args="-I veth1 -S -c 10" + test "$client_args" "$server_args" +done + +echo "OK. All tests passed" +exit 0 diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 9a9fc6c9b70b..b47f205f0310 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -30,9 +30,7 @@ int load_kallsyms(void) if (!f) return -ENOENT; - while (!feof(f)) { - if (!fgets(buf, sizeof(buf), f)) - break; + while (fgets(buf, sizeof(buf), f)) { if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3) break; if (!addr) diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 9093a8f64dc6..2d752c4f8d9d 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -215,9 +215,11 @@ BPF_MOV64_IMM(BPF_REG_0, 3), BPF_JMP_IMM(BPF_JA, 0, 0, -6), }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "back-edge from insn", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 1, }, { "calls: conditional call 4", @@ -250,22 +252,24 @@ BPF_MOV64_IMM(BPF_REG_0, 3), BPF_EXIT_INSN(), }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, }, { "calls: conditional call 6", .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), BPF_EXIT_INSN(), BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, mark)), BPF_EXIT_INSN(), }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "infinite loop detected", .result = REJECT, }, { diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c index 349c0862fb4c..4eb76ed739ce 100644 --- a/tools/testing/selftests/bpf/verifier/cfg.c +++ b/tools/testing/selftests/bpf/verifier/cfg.c @@ -41,7 +41,8 @@ BPF_JMP_IMM(BPF_JA, 0, 0, -1), BPF_EXIT_INSN(), }, - .errstr = "back-edge", + .errstr = "unreachable insn 1", + .errstr_unpriv = "back-edge", .result = REJECT, }, { @@ -53,18 +54,20 @@ BPF_JMP_IMM(BPF_JA, 0, 0, -4), BPF_EXIT_INSN(), }, - .errstr = "back-edge", + .errstr = "unreachable insn 4", + .errstr_unpriv = "back-edge", .result = REJECT, }, { "conditional loop", .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), BPF_EXIT_INSN(), }, - .errstr = "back-edge", + .errstr = "infinite loop detected", + .errstr_unpriv = "back-edge", .result = REJECT, }, diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c index d5c596fdc4b9..2c5fbe7bcd27 100644 --- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c @@ -511,7 +511,8 @@ offsetof(struct __sk_buff, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c index 1f39d845c64f..67ab12410050 100644 --- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c +++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c @@ -29,9 +29,9 @@ { "helper access to variable memory: stack, bitwise AND, zero included", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), @@ -46,9 +46,9 @@ { "helper access to variable memory: stack, bitwise AND + JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), @@ -122,9 +122,9 @@ { "helper access to variable memory: stack, JMP, bounds + offset", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), @@ -143,9 +143,9 @@ { "helper access to variable memory: stack, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), @@ -163,9 +163,9 @@ { "helper access to variable memory: stack, JMP, no max check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_MOV64_IMM(BPF_REG_4, 0), @@ -183,9 +183,9 @@ { "helper access to variable memory: stack, JMP, no min check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), @@ -201,9 +201,9 @@ { "helper access to variable memory: stack, JMP (signed), no min check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), @@ -244,6 +244,7 @@ { "helper access to variable memory: map, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), @@ -251,7 +252,7 @@ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4), @@ -262,7 +263,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_48b = { 3 }, + .fixup_map_hash_48b = { 4 }, .errstr = "invalid access to map value, value_size=48 off=0 size=49", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, @@ -296,6 +297,7 @@ { "helper access to variable memory: map adjusted, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), @@ -304,7 +306,7 @@ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4), @@ -315,7 +317,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_48b = { 3 }, + .fixup_map_hash_48b = { 4 }, .errstr = "R1 min value is outside of the array range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, @@ -337,8 +339,8 @@ { "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), @@ -562,6 +564,7 @@ { "helper access to variable memory: 8 bytes leak", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), BPF_MOV64_IMM(BPF_REG_0, 0), @@ -572,7 +575,6 @@ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 1), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c new file mode 100644 index 000000000000..5e980a5ab69d --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/loops1.c @@ -0,0 +1,161 @@ +{ + "bounded loop, count to 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop, count to 20", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded loop, count from positive unknown to 4", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop, count from totally unknown to 4", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded loop, count to 4 with equality", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded loop, start in the middle", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_A(1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "back-edge", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop containing a forward jump", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop that jumps out rather than in", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 10000, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_A(-4), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "infinite loop after a conditional jump", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 5), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_A(-2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "program is too large", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded recursion", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "back-edge", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "infinite loop in two jumps", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_A(0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "loop detected", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "infinite loop: three-jump trick", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "loop detected", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c index bbdba990fefb..da7a4b37cb98 100644 --- a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c +++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c @@ -29,21 +29,6 @@ .prog_type = BPF_PROG_TYPE_SOCK_OPS, }, { - "prevent map lookup in xskmap", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ "prevent map lookup in stack trace", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c index b31cd2cf50d0..9ed192e14f5f 100644 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ b/tools/testing/selftests/bpf/verifier/sock.c @@ -498,3 +498,21 @@ .result = REJECT, .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem", }, +{ + "bpf_map_lookup_elem(xskmap, &key); xs->queue_id", + .insns = { + BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_xdp_sock, queue_id)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_xskmap = { 3 }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c new file mode 100644 index 000000000000..d60a343b1371 --- /dev/null +++ b/tools/testing/selftests/bpf/xdping.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */ + +#include <linux/bpf.h> +#include <linux/if_link.h> +#include <arpa/inet.h> +#include <assert.h> +#include <errno.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <libgen.h> +#include <sys/resource.h> +#include <net/if.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <netdb.h> + +#include "bpf/bpf.h" +#include "bpf/libbpf.h" + +#include "xdping.h" + +static int ifindex; +static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; + +static void cleanup(int sig) +{ + bpf_set_link_xdp_fd(ifindex, -1, xdp_flags); + if (sig) + exit(1); +} + +static int get_stats(int fd, __u16 count, __u32 raddr) +{ + struct pinginfo pinginfo = { 0 }; + char inaddrbuf[INET_ADDRSTRLEN]; + struct in_addr inaddr; + __u16 i; + + inaddr.s_addr = raddr; + + printf("\nXDP RTT data:\n"); + + if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) { + perror("bpf_map_lookup elem: "); + return 1; + } + + for (i = 0; i < count; i++) { + if (pinginfo.times[i] == 0) + break; + + printf("64 bytes from %s: icmp_seq=%d ttl=64 time=%#.5f ms\n", + inet_ntop(AF_INET, &inaddr, inaddrbuf, + sizeof(inaddrbuf)), + count + i + 1, + (double)pinginfo.times[i]/1000000); + } + + if (i < count) { + fprintf(stderr, "Expected %d samples, got %d.\n", count, i); + return 1; + } + + bpf_map_delete_elem(fd, &raddr); + + return 0; +} + +static void show_usage(const char *prog) +{ + fprintf(stderr, + "usage: %s [OPTS] -I interface destination\n\n" + "OPTS:\n" + " -c count Stop after sending count requests\n" + " (default %d, max %d)\n" + " -I interface interface name\n" + " -N Run in driver mode\n" + " -s Server mode\n" + " -S Run in skb mode\n", + prog, XDPING_DEFAULT_COUNT, XDPING_MAX_COUNT); +} + +int main(int argc, char **argv) +{ + __u32 mode_flags = XDP_FLAGS_DRV_MODE | XDP_FLAGS_SKB_MODE; + struct addrinfo *a, hints = { .ai_family = AF_INET }; + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + __u16 count = XDPING_DEFAULT_COUNT; + struct pinginfo pinginfo = { 0 }; + const char *optstr = "c:I:NsS"; + struct bpf_program *main_prog; + int prog_fd = -1, map_fd = -1; + struct sockaddr_in rin; + struct bpf_object *obj; + struct bpf_map *map; + char *ifname = NULL; + char filename[256]; + int opt, ret = 1; + __u32 raddr = 0; + int server = 0; + char cmd[256]; + + while ((opt = getopt(argc, argv, optstr)) != -1) { + switch (opt) { + case 'c': + count = atoi(optarg); + if (count < 1 || count > XDPING_MAX_COUNT) { + fprintf(stderr, + "min count is 1, max count is %d\n", + XDPING_MAX_COUNT); + return 1; + } + break; + case 'I': + ifname = optarg; + ifindex = if_nametoindex(ifname); + if (!ifindex) { + fprintf(stderr, "Could not get interface %s\n", + ifname); + return 1; + } + break; + case 'N': + xdp_flags |= XDP_FLAGS_DRV_MODE; + break; + case 's': + /* use server program */ + server = 1; + break; + case 'S': + xdp_flags |= XDP_FLAGS_SKB_MODE; + break; + default: + show_usage(basename(argv[0])); + return 1; + } + } + + if (!ifname) { + show_usage(basename(argv[0])); + return 1; + } + if (!server && optind == argc) { + show_usage(basename(argv[0])); + return 1; + } + + if ((xdp_flags & mode_flags) == mode_flags) { + fprintf(stderr, "-N or -S can be specified, not both.\n"); + show_usage(basename(argv[0])); + return 1; + } + + if (!server) { + /* Only supports IPv4; see hints initiailization above. */ + if (getaddrinfo(argv[optind], NULL, &hints, &a) || !a) { + fprintf(stderr, "Could not resolve %s\n", argv[optind]); + return 1; + } + memcpy(&rin, a->ai_addr, sizeof(rin)); + raddr = rin.sin_addr.s_addr; + freeaddrinfo(a); + } + + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (bpf_prog_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) { + fprintf(stderr, "load of %s failed\n", filename); + return 1; + } + + main_prog = bpf_object__find_program_by_title(obj, + server ? "xdpserver" : + "xdpclient"); + if (main_prog) + prog_fd = bpf_program__fd(main_prog); + if (!main_prog || prog_fd < 0) { + fprintf(stderr, "could not find xdping program"); + return 1; + } + + map = bpf_map__next(NULL, obj); + if (map) + map_fd = bpf_map__fd(map); + if (!map || map_fd < 0) { + fprintf(stderr, "Could not find ping map"); + goto done; + } + + signal(SIGINT, cleanup); + signal(SIGTERM, cleanup); + + printf("Setting up XDP for %s, please wait...\n", ifname); + + printf("XDP setup disrupts network connectivity, hit Ctrl+C to quit\n"); + + if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) { + fprintf(stderr, "Link set xdp fd failed for %s\n", ifname); + goto done; + } + + if (server) { + close(prog_fd); + close(map_fd); + printf("Running server on %s; press Ctrl+C to exit...\n", + ifname); + do { } while (1); + } + + /* Start xdping-ing from last regular ping reply, e.g. for a count + * of 10 ICMP requests, we start xdping-ing using reply with seq number + * 10. The reason the last "real" ping RTT is much higher is that + * the ping program sees the ICMP reply associated with the last + * XDP-generated packet, so ping doesn't get a reply until XDP is done. + */ + pinginfo.seq = htons(count); + pinginfo.count = count; + + if (bpf_map_update_elem(map_fd, &raddr, &pinginfo, BPF_ANY)) { + fprintf(stderr, "could not communicate with BPF map: %s\n", + strerror(errno)); + cleanup(0); + goto done; + } + + /* We need to wait for XDP setup to complete. */ + sleep(10); + + snprintf(cmd, sizeof(cmd), "ping -c %d -I %s %s", + count, ifname, argv[optind]); + + printf("\nNormal ping RTT data\n"); + printf("[Ignore final RTT; it is distorted by XDP using the reply]\n"); + + ret = system(cmd); + + if (!ret) + ret = get_stats(map_fd, count, raddr); + + cleanup(0); + +done: + if (prog_fd > 0) + close(prog_fd); + if (map_fd > 0) + close(map_fd); + + return ret; +} diff --git a/tools/testing/selftests/bpf/xdping.h b/tools/testing/selftests/bpf/xdping.h new file mode 100644 index 000000000000..afc578df77be --- /dev/null +++ b/tools/testing/selftests/bpf/xdping.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */ + +#define XDPING_MAX_COUNT 10 +#define XDPING_DEFAULT_COUNT 4 + +struct pinginfo { + __u64 start; + __be16 seq; + __u16 count; + __u32 pad; + __u64 times[XDPING_MAX_COUNT]; +}; |