summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/libeth/xdp.c
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2025-06-12 18:02:24 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2025-06-16 11:40:14 -0700
commitc4ba6a9b9d460c6fd742e118022f2808ec3c4223 (patch)
treeb3592e407f4118a922f2585cdf3b1134f45a74e4 /drivers/net/ethernet/intel/libeth/xdp.c
parent26ce8eb0bb7d47c5fb36f7c12f34e4a320f14cac (diff)
libeth: xdp: add XDPSQ locking helpers
Unfortunately, it's not always possible to allocate max(num_rxqs, nr_cpu_ids) even on hi-end NICs. To mitigate this, add simple locking helpers to libeth_xdp. As long as XDPSQs are not shared, the whole functionality is gated behind a static lock. Otherwise, each bulk flush locks the queue for the time of cleaning and filling the descriptors. As long as this particular queue is not used by more than 1 CPU, the impact is minimal (runtime check for boolean twice per 16+ descriptors). Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> # static key Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/libeth/xdp.c')
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/libeth/xdp.c b/drivers/net/ethernet/intel/libeth/xdp.c
index c29a1a0dfc57..0f08dd405190 100644
--- a/drivers/net/ethernet/intel/libeth/xdp.c
+++ b/drivers/net/ethernet/intel/libeth/xdp.c
@@ -9,6 +9,53 @@
#include "priv.h"
+/* XDPSQ sharing */
+
+DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ bool warn;
+
+ spin_lock_init(&lock->lock);
+ lock->share = true;
+
+ warn = !static_key_enabled(&libeth_xdpsq_share);
+ static_branch_inc(&libeth_xdpsq_share);
+
+ if (warn && net_ratelimit())
+ netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
+
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ static_branch_dec(&libeth_xdpsq_share);
+
+ if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
+ netdev_notice(dev, "XDPSQ sharing disabled\n");
+
+ lock->share = false;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
+
+void __acquires(&lock->lock)
+__libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ spin_lock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
+
+void __releases(&lock->lock)
+__libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
+
/* ``XDP_TX`` bulking */
static void __cold