summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h122
1 files changed, 75 insertions, 47 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4f20f62f9d63..3b94ec161e8c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -184,8 +184,8 @@ struct bpf_map_ops {
};
enum {
- /* Support at most 10 fields in a BTF type */
- BTF_FIELDS_MAX = 10,
+ /* Support at most 11 fields in a BTF type */
+ BTF_FIELDS_MAX = 11,
};
enum btf_field_type {
@@ -202,6 +202,7 @@ enum btf_field_type {
BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
+ BPF_WORKQUEUE = (1 << 10),
};
typedef void (*btf_dtor_kfunc_t)(void *);
@@ -238,6 +239,7 @@ struct btf_record {
u32 field_mask;
int spin_lock_off;
int timer_off;
+ int wq_off;
int refcount_off;
struct btf_field fields[];
};
@@ -273,7 +275,7 @@ struct bpf_map {
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct obj_cgroup *objcg;
#endif
char name[BPF_OBJ_NAME_LEN];
@@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_spin_lock";
case BPF_TIMER:
return "bpf_timer";
+ case BPF_WORKQUEUE:
+ return "bpf_wq";
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
return "kptr";
@@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_spin_lock);
case BPF_TIMER:
return sizeof(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return sizeof(struct bpf_wq);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
@@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_spin_lock);
case BPF_TIMER:
return __alignof__(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return __alignof__(struct bpf_wq);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
@@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
case BPF_SPIN_LOCK:
case BPF_TIMER:
+ case BPF_WORKQUEUE:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
@@ -525,6 +534,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
+void bpf_wq_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
@@ -1116,8 +1126,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
void *func_addr);
void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
-void arch_protect_bpf_trampoline(void *image, unsigned int size);
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, void *func_addr);
@@ -1266,6 +1275,7 @@ int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
@@ -1574,12 +1584,26 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- struct work_struct work;
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
};
struct bpf_link_ops {
void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if underlying BPF program is sleepable we go through tasks trace
+ * RCU GP and then "classic" RCU GP
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
@@ -1588,6 +1612,7 @@ struct bpf_link_ops {
struct bpf_link_info *info);
int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
struct bpf_map *old_map);
+ __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
};
struct bpf_tramp_link {
@@ -1608,6 +1633,12 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_raw_tp_link {
+ struct bpf_link link;
+ struct bpf_raw_event_map *btp;
+ u64 cookie;
+};
+
struct bpf_link_primer {
struct bpf_link *link;
struct file *file;
@@ -1700,9 +1731,9 @@ struct bpf_struct_ops {
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
- int (*reg)(void *kdata);
- void (*unreg)(void *kdata);
- int (*update)(void *kdata, void *old_kdata);
+ int (*reg)(void *kdata, struct bpf_link *link);
+ void (*unreg)(void *kdata, struct bpf_link *link);
+ int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
int (*validate)(void *kdata);
void *cfi_stubs;
struct module *owner;
@@ -2190,6 +2221,7 @@ void bpf_map_free_record(struct bpf_map *map);
struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
+void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
@@ -2221,7 +2253,7 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
unsigned long nr_pages, struct page **page_array);
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
@@ -2230,31 +2262,18 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags);
#else
-static inline void *
-bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
- int node)
-{
- return kmalloc_node(size, flags, node);
-}
-
-static inline void *
-bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
-{
- return kzalloc(size, flags);
-}
-
-static inline void *
-bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
-{
- return kvcalloc(n, size, flags);
-}
-
-static inline void __percpu *
-bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
- gfp_t flags)
-{
- return __alloc_percpu_gfp(size, align, flags);
-}
+/*
+ * These specialized allocators have to be macros for their allocations to be
+ * accounted separately (to have separate alloc_tag).
+ */
+#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
+ kmalloc_node(_size, _flags, _node)
+#define bpf_map_kzalloc(_map, _size, _flags) \
+ kzalloc(_size, _flags)
+#define bpf_map_kvcalloc(_map, _n, _size, _flags) \
+ kvcalloc(_n, _size, _flags)
+#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
+ __alloc_percpu_gfp(_size, _align, _flags)
#endif
static inline int
@@ -2319,6 +2338,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
@@ -2478,7 +2498,7 @@ struct sk_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
-void __dev_flush(void);
+void __dev_flush(struct list_head *flush_list);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
@@ -2491,7 +2511,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog, struct bpf_map *map,
bool exclude_ingress);
-void __cpu_map_flush(void);
+void __cpu_map_flush(struct list_head *flush_list);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
@@ -2628,8 +2648,6 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
-bool dev_check_flush(void);
-bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -2690,6 +2708,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
{
}
+static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+{
+ return NULL;
+}
+
static inline void bpf_link_put(struct bpf_link *link)
{
}
@@ -2717,7 +2740,7 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void __dev_flush(void)
+static inline void __dev_flush(struct list_head *flush_list)
{
}
@@ -2763,7 +2786,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static inline void __cpu_map_flush(void)
+static inline void __cpu_map_flush(struct list_head *flush_list)
{
}
@@ -2912,8 +2935,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
return ret;
}
-void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
- struct btf_mod_pair *used_btfs, u32 len);
+void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
@@ -2991,6 +3013,7 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
void sock_map_unhash(struct sock *sk);
void sock_map_destroy(struct sock *sk);
@@ -3089,6 +3112,11 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
{
return -EINVAL;
}
+
+static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
@@ -3238,8 +3266,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
-int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr);
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr);
#else
static inline bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -3261,8 +3289,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
-static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr)
+static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr)
{
return -EOPNOTSUPP;
}