summaryrefslogtreecommitdiff
path: root/net/rds/tcp_recv.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-05-17 17:44:08 -0700
committerDavid S. Miller <davem@davemloft.net>2016-05-19 11:36:49 -0700
commit38036629cded6b96a9f9689758a88d067c4d4d44 (patch)
tree419a1d0d02014f0fc416939a93792d1c4aee24f6 /net/rds/tcp_recv.c
parente1daca289a36965d923ec26647b5668e023eb0ac (diff)
rds: tcp: block BH in TCP callbacks
TCP stack can now run from process context. Use read_lock_bh(&sk->sk_callback_lock) variant to restore previous assumption. Fixes: 5413d1babe8f ("net: do not block BH while processing socket backlog") Fixes: d41a69f1d390 ("tcp: make tcp_sendmsg() aware of socket backlog") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/tcp_recv.c')
-rw-r--r--net/rds/tcp_recv.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index d75d8b56a9e3..c3196f9d070a 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -301,7 +301,7 @@ void rds_tcp_data_ready(struct sock *sk)
rdsdebug("data ready sk %p\n", sk);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -315,7 +315,7 @@ void rds_tcp_data_ready(struct sock *sk)
if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk);
}