summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2019-10-23 22:44:51 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-11-10 11:27:49 +0100
commit4f3df7f1eaa70903c2c0c73c0999e120fe452a2a (patch)
tree3c20c466e259af2fc6972a7d4a3e3c8f43b2e39f /net/core
parenteaf548feaa17308317bdac2903c1be820e5c186a (diff)
net: use skb_queue_empty_lockless() in busy poll contexts
[ Upstream commit 3f926af3f4d688e2e11e7f8ed04e277a14d4d4a4 ] Busy polling usually runs without locks. Let's use skb_queue_empty_lockless() instead of skb_queue_empty() Also uses READ_ONCE() in __skb_try_recv_datagram() to address a similar potential problem. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/sock.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b69e0e28b826..d3b99616a6c5 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (sk->sk_receive_queue.prev != *last);
+ } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
error = -EAGAIN;
diff --git a/net/core/sock.c b/net/core/sock.c
index 5f652dbe66a2..6c1107821776 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3483,7 +3483,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
{
struct sock *sk = p;
- return !skb_queue_empty(&sk->sk_receive_queue) ||
+ return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);