diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2023-10-11 09:50:59 +0200 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2023-10-11 09:50:59 +0200 |
commit | 57390019b68b83f96eb98f490367b9df1f2d77cb (patch) | |
tree | e6d4b6c75efdd2d7fb7d37f980688c491be3ff6a /net/core/sock_map.c | |
parent | e5f9d543419c78ac58f3b3557bc5a76b20ff600b (diff) | |
parent | 389af786f92ecdff35883551d54bf4e507ffcccb (diff) |
Merge drm/drm-next into drm-misc-next
Updating drm-misc-next to the state of Linux v6.6-rc2.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'net/core/sock_map.c')
-rw-r--r-- | net/core/sock_map.c | 48 |
1 files changed, 23 insertions, 25 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 19538d628714..cb11750b1df5 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -18,7 +18,7 @@ struct bpf_stab { struct bpf_map map; struct sock **sks; struct sk_psock_progs progs; - raw_spinlock_t lock; + spinlock_t lock; }; #define SOCK_CREATE_FLAG_MASK \ @@ -44,7 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&stab->map, attr); - raw_spin_lock_init(&stab->lock); + spin_lock_init(&stab->lock); stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * sizeof(struct sock *), @@ -115,7 +115,6 @@ static void sock_map_sk_acquire(struct sock *sk) __acquires(&sk->sk_lock.slock) { lock_sock(sk); - preempt_disable(); rcu_read_lock(); } @@ -123,7 +122,6 @@ static void sock_map_sk_release(struct sock *sk) __releases(&sk->sk_lock.slock) { rcu_read_unlock(); - preempt_enable(); release_sock(sk); } @@ -148,13 +146,13 @@ static void sock_map_del_link(struct sock *sk, list_for_each_entry_safe(link, tmp, &psock->link, list) { if (link->link_raw == link_raw) { struct bpf_map *map = link->map; - struct bpf_stab *stab = container_of(map, struct bpf_stab, - map); - if (psock->saved_data_ready && stab->progs.stream_parser) + struct sk_psock_progs *progs = sock_map_progs(map); + + if (psock->saved_data_ready && progs->stream_parser) strp_stop = true; - if (psock->saved_data_ready && stab->progs.stream_verdict) + if (psock->saved_data_ready && progs->stream_verdict) verdict_stop = true; - if (psock->saved_data_ready && stab->progs.skb_verdict) + if (psock->saved_data_ready && progs->skb_verdict) verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); @@ -413,7 +411,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, struct sock *sk; int err = 0; - raw_spin_lock_bh(&stab->lock); + spin_lock_bh(&stab->lock); sk = *psk; if (!sk_test || sk_test == sk) sk = xchg(psk, NULL); @@ -423,7 +421,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, else err = -EINVAL; - raw_spin_unlock_bh(&stab->lock); + spin_unlock_bh(&stab->lock); return err; } @@ -489,7 +487,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx, psock = sk_psock(sk); WARN_ON_ONCE(!psock); - raw_spin_lock_bh(&stab->lock); + spin_lock_bh(&stab->lock); osk = stab->sks[idx]; if (osk && flags == BPF_NOEXIST) { ret = -EEXIST; @@ -503,10 +501,10 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx, stab->sks[idx] = sk; if (osk) sock_map_unref(osk, &stab->sks[idx]); - raw_spin_unlock_bh(&stab->lock); + spin_unlock_bh(&stab->lock); return 0; out_unlock: - raw_spin_unlock_bh(&stab->lock); + spin_unlock_bh(&stab->lock); if (psock) sk_psock_put(sk, psock); out_free: @@ -837,7 +835,7 @@ struct bpf_shtab_elem { struct bpf_shtab_bucket { struct hlist_head head; - raw_spinlock_t lock; + spinlock_t lock; }; struct bpf_shtab { @@ -912,7 +910,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, * is okay since it's going away only after RCU grace period. * However, we need to check whether it's still present. */ - raw_spin_lock_bh(&bucket->lock); + spin_lock_bh(&bucket->lock); elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, elem->key, map->key_size); if (elem_probe && elem_probe == elem) { @@ -920,7 +918,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, sock_map_unref(elem->sk, elem); sock_hash_free_elem(htab, elem); } - raw_spin_unlock_bh(&bucket->lock); + spin_unlock_bh(&bucket->lock); } static long sock_hash_delete_elem(struct bpf_map *map, void *key) @@ -934,7 +932,7 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key) hash = sock_hash_bucket_hash(key, key_size); bucket = sock_hash_select_bucket(htab, hash); - raw_spin_lock_bh(&bucket->lock); + spin_lock_bh(&bucket->lock); elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); if (elem) { hlist_del_rcu(&elem->node); @@ -942,7 +940,7 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key) sock_hash_free_elem(htab, elem); ret = 0; } - raw_spin_unlock_bh(&bucket->lock); + spin_unlock_bh(&bucket->lock); return ret; } @@ -1002,7 +1000,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key, hash = sock_hash_bucket_hash(key, key_size); bucket = sock_hash_select_bucket(htab, hash); - raw_spin_lock_bh(&bucket->lock); + spin_lock_bh(&bucket->lock); elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); if (elem && flags == BPF_NOEXIST) { ret = -EEXIST; @@ -1028,10 +1026,10 @@ static int sock_hash_update_common(struct bpf_map *map, void *key, sock_map_unref(elem->sk, elem); sock_hash_free_elem(htab, elem); } - raw_spin_unlock_bh(&bucket->lock); + spin_unlock_bh(&bucket->lock); return 0; out_unlock: - raw_spin_unlock_bh(&bucket->lock); + spin_unlock_bh(&bucket->lock); sk_psock_put(sk, psock); out_free: sk_psock_free_link(link); @@ -1117,7 +1115,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) for (i = 0; i < htab->buckets_num; i++) { INIT_HLIST_HEAD(&htab->buckets[i].head); - raw_spin_lock_init(&htab->buckets[i].lock); + spin_lock_init(&htab->buckets[i].lock); } return &htab->map; @@ -1149,11 +1147,11 @@ static void sock_hash_free(struct bpf_map *map) * exists, psock exists and holds a ref to socket. That * lets us to grab a socket ref too. */ - raw_spin_lock_bh(&bucket->lock); + spin_lock_bh(&bucket->lock); hlist_for_each_entry(elem, &bucket->head, node) sock_hold(elem->sk); hlist_move_list(&bucket->head, &unlink_list); - raw_spin_unlock_bh(&bucket->lock); + spin_unlock_bh(&bucket->lock); /* Process removed entries out of atomic context to * block for socket lock before deleting the psock's |