summaryrefslogtreecommitdiff
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-04-21 09:43:57 +0000
committerDavid S. Miller <davem@davemloft.net>2023-04-23 13:35:07 +0100
commit87eff2ec57b6d68d294013d8dd21e839a1175e3a (patch)
treeced867d044fe2843db10784167cbba91399f5131 /net/core/dev.c
parenta1aaee7f8f79d1b0595e24f8c3caed24630d6cb6 (diff)
net: optimize napi_threaded_poll() vs RPS/RFS
We use napi_threaded_poll() in order to reduce our softirq dependency. We can add a followup of 821eba962d95 ("net: optimize napi_schedule_rps()") to further remove the need of firing NET_RX_SOFTIRQ whenever RPS/RFS are used. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 7d9ec23f97c6..735096d42c1d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4603,10 +4603,10 @@ static void napi_schedule_rps(struct softnet_data *sd)
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
- /* If not called from net_rx_action()
+ /* If not called from net_rx_action() or napi_threaded_poll()
* we have to raise NET_RX_SOFTIRQ.
*/
- if (!mysd->in_net_rx_action)
+ if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
return;
}
@@ -6631,11 +6631,19 @@ static int napi_threaded_poll(void *data)
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
+ sd->in_napi_threaded_poll = true;
have = netpoll_poll_lock(napi);
__napi_poll(napi, &repoll);
netpoll_poll_unlock(have);
+ sd->in_napi_threaded_poll = false;
+ barrier();
+
+ if (sd_has_rps_ipi_waiting(sd)) {
+ local_irq_disable();
+ net_rps_action_and_irq_enable(sd);
+ }
skb_defer_free_flush(sd);
local_bh_enable();