summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-03-08 14:59:28 -0800
committerDavid S. Miller <davem@davemloft.net>2011-03-08 14:59:28 -0800
commit7b46ac4e77f3224a1befe032c77f1df31d1b42c4 (patch)
treee0bd89b476e0f07e23f949366c283f7d7d6f9d4e /net
parent5217e8794619ac0a29151f29be20c7d6188303ba (diff)
inetpeer: Don't disable BH for initial fast RCU lookup.
If modifications on other cpus are ok, then modifications to the tree during lookup done by the local cpu are ok too. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/inetpeer.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index f604ffdbea27..6442c35edb0b 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -206,16 +206,16 @@ static int addr_compare(const struct inetpeer_addr *a,
})
/*
- * Called with rcu_read_lock_bh()
+ * Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall
* in an endless loop.
* But every pointer we follow is guaranteed to be valid thanks to RCU.
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
-static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
- struct inet_peer_base *base)
+static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
+ struct inet_peer_base *base)
{
- struct inet_peer *u = rcu_dereference_bh(base->root);
+ struct inet_peer *u = rcu_dereference(base->root);
int count = 0;
while (u != peer_avl_empty) {
@@ -231,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
return u;
}
if (cmp == -1)
- u = rcu_dereference_bh(u->avl_left);
+ u = rcu_dereference(u->avl_left);
else
- u = rcu_dereference_bh(u->avl_right);
+ u = rcu_dereference(u->avl_right);
if (unlikely(++count == PEER_MAXDEPTH))
break;
}
@@ -470,11 +470,11 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
*/
- rcu_read_lock_bh();
+ rcu_read_lock();
sequence = read_seqbegin(&base->lock);
- p = lookup_rcu_bh(daddr, base);
+ p = lookup_rcu(daddr, base);
invalidated = read_seqretry(&base->lock, sequence);
- rcu_read_unlock_bh();
+ rcu_read_unlock();
if (p) {
/* The existing node has been found.