diff options
-rw-r--r-- | Documentation/networking/scaling.rst | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 43 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_arfs.c | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_arfs.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_base.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 47 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_lib.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.h | 6 | ||||
-rw-r--r-- | include/linux/cpu_rmap.h | 1 | ||||
-rw-r--r-- | include/linux/netdevice.h | 24 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 2 | ||||
-rw-r--r-- | net/core/dev.c | 169 | ||||
-rw-r--r-- | tools/testing/selftests/drivers/net/hw/Makefile | 4 | ||||
-rwxr-xr-x | tools/testing/selftests/drivers/net/hw/irq.py | 99 | ||||
-rw-r--r-- | tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c | 13 | ||||
-rw-r--r-- | tools/testing/selftests/drivers/net/lib/py/env.py | 8 |
19 files changed, 343 insertions, 163 deletions
diff --git a/Documentation/networking/scaling.rst b/Documentation/networking/scaling.rst index 6984700cec6a..99b6a61e5e31 100644 --- a/Documentation/networking/scaling.rst +++ b/Documentation/networking/scaling.rst @@ -434,8 +434,10 @@ rps_dev_flow_table. The stack consults a CPU to hardware queue map which is maintained by the NIC driver. This is an auto-generated reverse map of the IRQ affinity table shown by /proc/interrupts. Drivers can use functions in the cpu_rmap (“CPU affinity reverse map”) kernel library -to populate the map. For each CPU, the corresponding queue in the map is -set to be one whose processing CPU is closest in cache locality. +to populate the map. Alternatively, drivers can delegate the cpu_rmap +management to the Kernel by calling netif_enable_cpu_rmap(). For each CPU, +the corresponding queue in the map is set to be one whose processing CPU is +closest in cache locality. Accelerated RFS Configuration diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c1295dfad0d0..6aab85a7c60a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -5,9 +5,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#ifdef CONFIG_RFS_ACCEL -#include <linux/cpu_rmap.h> -#endif /* CONFIG_RFS_ACCEL */ #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/module.h> @@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter, return 0; } -static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) -{ -#ifdef CONFIG_RFS_ACCEL - u32 i; - int rc; - - adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); - if (!adapter->netdev->rx_cpu_rmap) - return -ENOMEM; - for (i = 0; i < adapter->num_io_queues; i++) { - int irq_idx = ENA_IO_IRQ_IDX(i); - - rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, - pci_irq_vector(adapter->pdev, irq_idx)); - if (rc) { - free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); - adapter->netdev->rx_cpu_rmap = NULL; - return rc; - } - } -#endif /* CONFIG_RFS_ACCEL */ - return 0; -} - static void ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, u16 qid) { @@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter) adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; } - if (ena_init_rx_cpu_rmap(adapter)) + if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues)) netif_warn(adapter, probe, adapter->netdev, "Failed to map IRQs to CPUs\n"); @@ -1742,13 +1715,6 @@ static void ena_free_io_irq(struct ena_adapter *adapter) struct ena_irq *irq; int i; -#ifdef CONFIG_RFS_ACCEL - if (adapter->msix_vecs >= 1) { - free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); - adapter->netdev->rx_cpu_rmap = NULL; - } -#endif /* CONFIG_RFS_ACCEL */ - for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; irq_set_affinity_hint(irq->vector, NULL); @@ -4131,13 +4097,6 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) ena_dev = adapter->ena_dev; netdev = adapter->netdev; -#ifdef CONFIG_RFS_ACCEL - if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { - free_irq_cpu_rmap(netdev->rx_cpu_rmap); - netdev->rx_cpu_rmap = NULL; - } - -#endif /* CONFIG_RFS_ACCEL */ /* Make sure timer and reset routine won't be called after * freeing device resources. */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index c9104b13e1d2..5ceeae664598 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -475,9 +475,6 @@ struct ice_q_vector { struct ice_ring_container rx; struct ice_ring_container tx; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; - struct ice_channel *ch; char name[ICE_INT_NAME_STR_LEN]; diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 7cee365cc7d1..171cdec741c2 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -571,25 +571,6 @@ void ice_clear_arfs(struct ice_vsi *vsi) } /** - * ice_free_cpu_rx_rmap - free setup CPU reverse map - * @vsi: the VSI to be forwarded to - */ -void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) -{ - struct net_device *netdev; - - if (!vsi || vsi->type != ICE_VSI_PF) - return; - - netdev = vsi->netdev; - if (!netdev || !netdev->rx_cpu_rmap) - return; - - free_irq_cpu_rmap(netdev->rx_cpu_rmap); - netdev->rx_cpu_rmap = NULL; -} - -/** * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue * @vsi: the VSI to be forwarded to */ @@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; struct ice_pf *pf; - int i; if (!vsi || vsi->type != ICE_VSI_PF) return 0; @@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", vsi->type, netdev->name, vsi->num_q_vectors); - netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors); - if (unlikely(!netdev->rx_cpu_rmap)) - return -EINVAL; - - ice_for_each_q_vector(vsi, i) - if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - vsi->q_vectors[i]->irq.virq)) { - ice_free_cpu_rx_rmap(vsi); - return -EINVAL; - } - - return 0; + return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h index 9669ad9bf7b5..9706293128c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.h +++ b/drivers/net/ethernet/intel/ice/ice_arfs.h @@ -45,7 +45,6 @@ int ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, u16 rxq_idx, u32 flow_id); void ice_clear_arfs(struct ice_vsi *vsi); -void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); void ice_init_arfs(struct ice_vsi *vsi); void ice_sync_arfs_fltrs(struct ice_pf *pf); int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); @@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type); #else static inline void ice_clear_arfs(struct ice_vsi *vsi) { } -static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { } static inline void ice_init_arfs(struct ice_vsi *vsi) { } static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { } static inline void ice_remove_arfs(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index b3234a55a253..6db4ad8fc70b 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -147,10 +147,6 @@ skip_alloc: q_vector->reg_idx = q_vector->irq.index; q_vector->vf_reg_idx = q_vector->irq.index; - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); - /* This will not be called in the driver load path because the netdev * will not be created yet. All other cases with register the NAPI * handler here (i.e. resume, reset/rebuild, etc.) @@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) return; - netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, + netif_set_xps_queue(ring->netdev, + &ring->q_vector->napi.config->affinity_mask, ring->q_index); } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 7f5b229cab05..715efd8a359f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2592,7 +2592,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; - ice_free_cpu_rx_rmap(vsi); ice_for_each_q_vector(vsi, i) { int irq_num; @@ -2605,12 +2604,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) vsi->q_vectors[i]->num_ring_rx)) continue; - /* clear the affinity notifier in the IRQ descriptor */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) - irq_set_affinity_notifier(irq_num, NULL); - - /* clear the affinity_hint in the IRQ descriptor */ - irq_update_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } @@ -2765,11 +2758,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) { struct net_device *netdev = vsi->netdev; - int q_idx; + int q_idx, v_idx; if (!netdev) return; + /* Clear the NAPI's interrupt number */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + + netif_napi_set_irq(&q_vector->napi, -1); + } + ice_for_each_txq(vsi, q_idx) netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b084839eb811..eff4afabeef6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2528,34 +2528,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) } /** - * ice_irq_affinity_notify - Callback for affinity changes - * @notify: context as to what irq was changed - * @mask: the new affinity mask - * - * This is a callback function used by the irq_set_affinity_notifier function - * so that we may register to receive changes to the irq affinity masks. - */ -static void -ice_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct ice_q_vector *q_vector = - container_of(notify, struct ice_q_vector, affinity_notify); - - cpumask_copy(&q_vector->affinity_mask, mask); -} - -/** - * ice_irq_affinity_release - Callback for affinity notifier release - * @ref: internal core kernel usage - * - * This is a callback function used by the irq_set_affinity_notifier function - * to inform the current notification subscriber that they will no longer - * receive notifications. - */ -static void ice_irq_affinity_release(struct kref __always_unused *ref) {} - -/** * ice_vsi_ena_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured */ @@ -2618,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) err); goto free_q_irqs; } - - /* register for affinity change notifications */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { - struct irq_affinity_notify *affinity_notify; - - affinity_notify = &q_vector->affinity_notify; - affinity_notify->notify = ice_irq_affinity_notify; - affinity_notify->release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, affinity_notify); - } - - /* assign the mask for this irq */ - irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); } err = ice_set_cpu_rx_rmap(vsi); @@ -2646,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector--) { irq_num = vsi->q_vectors[vector]->irq.virq; - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) - irq_set_affinity_notifier(irq_num, NULL); - irq_update_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; @@ -3675,6 +3631,9 @@ void ice_set_netdev_features(struct net_device *netdev) */ netdev->hw_features |= NETIF_F_RXFCS; + /* Allow core to manage IRQs affinity */ + netif_set_affinity_auto(netdev); + netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index a3d6b8f198a8..f3aea7bcdaa3 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -814,6 +814,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) netdev->hw_features |= dflt_features | offloads; netdev->hw_enc_features |= dflt_features | offloads; idpf_set_ethtool_ops(netdev); + netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); /* carrier off on init to avoid Tx hangs */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 2747dc69999a..672dfad1fb21 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3551,8 +3551,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) q_vector->tx = NULL; kfree(q_vector->rx); q_vector->rx = NULL; - - free_cpumask_var(q_vector->affinity_mask); } kfree(vport->q_vectors); @@ -3579,8 +3577,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); kfree(free_irq(irq_num, q_vector)); } } @@ -3768,8 +3764,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport) "Request_irq failed, error: %d\n", err); goto free_q_irqs; } - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, q_vector->affinity_mask); } return 0; @@ -4181,7 +4175,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) { int (*napi_poll)(struct napi_struct *napi, int budget); - u16 v_idx; + u16 v_idx, qv_idx; + int irq_num; if (idpf_is_queue_model_split(vport->txq_model)) napi_poll = idpf_vport_splitq_napi_poll; @@ -4190,12 +4185,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + qv_idx = vport->q_vector_idxs[v_idx]; + irq_num = vport->adapter->msix_entries[qv_idx].vector; - netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); - - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, q_vector->affinity_mask); + netif_napi_add_config(vport->netdev, &q_vector->napi, + napi_poll, v_idx); + netif_napi_set_irq(&q_vector->napi, irq_num); } } @@ -4239,9 +4234,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; - if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) - goto error; - q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), GFP_KERNEL); if (!q_vector->tx) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index cd9a20c9cc7f..b029f566e57c 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -382,7 +382,6 @@ struct idpf_intr_reg { * @rx_intr_mode: Dynamic ITR or not * @rx_itr_idx: RX ITR index * @v_idx: Vector index - * @affinity_mask: CPU affinity mask */ struct idpf_q_vector { __cacheline_group_begin_aligned(read_mostly); @@ -419,13 +418,12 @@ struct idpf_q_vector { __cacheline_group_begin_aligned(cold); u16 v_idx; - cpumask_var_t affinity_mask; __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_q_vector, 120, 24 + sizeof(struct napi_struct) + 2 * sizeof(struct dim), - 8 + sizeof(cpumask_var_t)); + 8); struct idpf_rx_queue_stats { u64_stats_t packets; @@ -921,7 +919,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) if (!q_vector) return NUMA_NO_NODE; - cpu = cpumask_first(q_vector->affinity_mask); + cpu = cpumask_first(&q_vector->napi.config->affinity_mask); return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE; } diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index 20b5729903d7..2fd7ba75362a 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -32,6 +32,7 @@ struct cpu_rmap { #define CPU_RMAP_DIST_INF 0xffff extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); +extern void cpu_rmap_get(struct cpu_rmap *rmap); extern int cpu_rmap_put(struct cpu_rmap *rmap); extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9a387d456592..2094d3edda73 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -352,6 +352,7 @@ struct napi_config { u64 gro_flush_timeout; u64 irq_suspend_timeout; u32 defer_hard_irqs; + cpumask_t affinity_mask; unsigned int napi_id; }; @@ -394,6 +395,8 @@ struct napi_struct { struct list_head dev_list; struct hlist_node napi_hash_node; int irq; + struct irq_affinity_notify notify; + int napi_rmap_idx; int index; struct napi_config *config; }; @@ -409,6 +412,7 @@ enum { NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ + NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */ }; enum { @@ -422,6 +426,7 @@ enum { NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), + NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER), }; enum gro_result { @@ -1989,6 +1994,15 @@ enum netdev_reg_state { * * @threaded: napi threaded mode is enabled * + * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ + * affinity. Set by netif_enable_irq_affinity(), then + * the driver must create a persistent napi by + * netif_napi_add_config() and finally bind the napi to + * IRQ (via netif_napi_set_irq()). + * + * @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap. + * Set by calling netif_enable_cpu_rmap(). + * * @see_all_hwtstamp_requests: device wants to see calls to * ndo_hwtstamp_set() for all timestamp requests * regardless of source, even if those aren't @@ -2396,6 +2410,8 @@ struct net_device { struct lock_class_key *qdisc_tx_busylock; bool proto_down; bool threaded; + bool irq_affinity_auto; + bool rx_cpu_rmap_auto; /* priv_flags_slow, ungrouped to save space */ unsigned long see_all_hwtstamp_requests:1; @@ -2724,10 +2740,7 @@ static inline void netdev_assert_locked_or_invisible(struct net_device *dev) netdev_assert_locked(dev); } -static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) -{ - napi->irq = irq; -} +void netif_napi_set_irq_locked(struct napi_struct *napi, int irq); static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) { @@ -2865,6 +2878,9 @@ static inline void netif_napi_del(struct napi_struct *napi) synchronize_net(); } +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs); +void netif_set_affinity_auto(struct net_device *dev); + struct packet_type { __be16 type; /* This is really htons(ether_type). */ bool ignore_outgoing; diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 4c348670da31..f03d9be3f06b 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -73,7 +73,7 @@ static void cpu_rmap_release(struct kref *ref) * cpu_rmap_get - internal helper to get new ref on a cpu_rmap * @rmap: reverse-map allocated with alloc_cpu_rmap() */ -static inline void cpu_rmap_get(struct cpu_rmap *rmap) +void cpu_rmap_get(struct cpu_rmap *rmap) { kref_get(&rmap->refcount); } diff --git a/net/core/dev.c b/net/core/dev.c index 3f525278a871..b0ab0169f507 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6943,11 +6943,175 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, } EXPORT_SYMBOL(netif_queue_set_napi); +static void +netif_napi_irq_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct napi_struct *napi = + container_of(notify, struct napi_struct, notify); +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; + int err; +#endif + + if (napi->config && napi->dev->irq_affinity_auto) + cpumask_copy(&napi->config->affinity_mask, mask); + +#ifdef CONFIG_RFS_ACCEL + if (napi->dev->rx_cpu_rmap_auto) { + err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); + if (err) + netdev_warn(napi->dev, "RMAP update failed (%d)\n", + err); + } +#endif +} + +#ifdef CONFIG_RFS_ACCEL +static void netif_napi_affinity_release(struct kref *ref) +{ + struct napi_struct *napi = + container_of(ref, struct napi_struct, notify.kref); + struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; + + netdev_assert_locked(napi->dev); + WARN_ON(test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, + &napi->state)); + + if (!napi->dev->rx_cpu_rmap_auto) + return; + rmap->obj[napi->napi_rmap_idx] = NULL; + napi->napi_rmap_idx = -1; + cpu_rmap_put(rmap); +} + +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs) +{ + if (dev->rx_cpu_rmap_auto) + return 0; + + dev->rx_cpu_rmap = alloc_irq_cpu_rmap(num_irqs); + if (!dev->rx_cpu_rmap) + return -ENOMEM; + + dev->rx_cpu_rmap_auto = true; + return 0; +} +EXPORT_SYMBOL(netif_enable_cpu_rmap); + +static void netif_del_cpu_rmap(struct net_device *dev) +{ + struct cpu_rmap *rmap = dev->rx_cpu_rmap; + + if (!dev->rx_cpu_rmap_auto) + return; + + /* Free the rmap */ + cpu_rmap_put(rmap); + dev->rx_cpu_rmap = NULL; + dev->rx_cpu_rmap_auto = false; +} + +#else +static void netif_napi_affinity_release(struct kref *ref) +{ +} + +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs) +{ + return 0; +} +EXPORT_SYMBOL(netif_enable_cpu_rmap); + +static void netif_del_cpu_rmap(struct net_device *dev) +{ +} +#endif + +void netif_set_affinity_auto(struct net_device *dev) +{ + unsigned int i, maxqs, numa; + + maxqs = max(dev->num_tx_queues, dev->num_rx_queues); + numa = dev_to_node(&dev->dev); + + for (i = 0; i < maxqs; i++) + cpumask_set_cpu(cpumask_local_spread(i, numa), + &dev->napi_config[i].affinity_mask); + + dev->irq_affinity_auto = true; +} +EXPORT_SYMBOL(netif_set_affinity_auto); + +void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) +{ + int rc; + + netdev_assert_locked_or_invisible(napi->dev); + + if (napi->irq == irq) + return; + + /* Remove existing resources */ + if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) + irq_set_affinity_notifier(napi->irq, NULL); + + napi->irq = irq; + if (irq < 0 || + (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) + return; + + /* Abort for buggy drivers */ + if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) + return; + +#ifdef CONFIG_RFS_ACCEL + if (napi->dev->rx_cpu_rmap_auto) { + rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); + if (rc < 0) + return; + + cpu_rmap_get(napi->dev->rx_cpu_rmap); + napi->napi_rmap_idx = rc; + } +#endif + + /* Use core IRQ notifier */ + napi->notify.notify = netif_napi_irq_notify; + napi->notify.release = netif_napi_affinity_release; + rc = irq_set_affinity_notifier(irq, &napi->notify); + if (rc) { + netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", + rc); + goto put_rmap; + } + + set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); + return; + +put_rmap: +#ifdef CONFIG_RFS_ACCEL + if (napi->dev->rx_cpu_rmap_auto) { + cpu_rmap_put(napi->dev->rx_cpu_rmap); + napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; + napi->napi_rmap_idx = -1; + } +#endif + napi->notify.notify = NULL; + napi->notify.release = NULL; +} +EXPORT_SYMBOL(netif_napi_set_irq_locked); + static void napi_restore_config(struct napi_struct *n) { n->defer_hard_irqs = n->config->defer_hard_irqs; n->gro_flush_timeout = n->config->gro_flush_timeout; n->irq_suspend_timeout = n->config->irq_suspend_timeout; + + if (n->dev->irq_affinity_auto && + test_bit(NAPI_STATE_HAS_NOTIFIER, &n->state)) + irq_set_affinity(n->irq, &n->config->affinity_mask); + /* a NAPI ID might be stored in the config, if so use it. if not, use * napi_hash_add to generate one for us. */ @@ -7168,6 +7332,9 @@ void __netif_napi_del_locked(struct napi_struct *napi) /* Make sure NAPI is disabled (or was never enabled). */ WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); + if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) + irq_set_affinity_notifier(napi->irq, NULL); + if (napi->config) { napi->index = -1; napi->config = NULL; @@ -11720,6 +11887,8 @@ void free_netdev(struct net_device *dev) netdev_napi_exit(dev); + netif_del_cpu_rmap(dev); + ref_tracker_dir_exit(&dev->refcnt_tracker); #ifdef CONFIG_PCPU_DEV_REFCNT free_percpu(dev->pcpu_refcnt); diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile index b2be14d0cc56..cde5814ff9a7 100644 --- a/tools/testing/selftests/drivers/net/hw/Makefile +++ b/tools/testing/selftests/drivers/net/hw/Makefile @@ -10,6 +10,7 @@ TEST_PROGS = \ ethtool_rmon.sh \ hw_stats_l3.sh \ hw_stats_l3_gre.sh \ + irq.py \ loopback.sh \ nic_link_layer.py \ nic_performance.py \ @@ -34,9 +35,12 @@ TEST_INCLUDES := \ # YNL files, must be before "include ..lib.mk" YNL_GEN_FILES := ncdevmem TEST_GEN_FILES += $(YNL_GEN_FILES) +TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c)) include ../../../lib.mk # YNL build YNL_GENS := ethtool netdev include ../../../net/ynl.mk + +include ../../../net/bpf.mk diff --git a/tools/testing/selftests/drivers/net/hw/irq.py b/tools/testing/selftests/drivers/net/hw/irq.py new file mode 100755 index 000000000000..42ab98370245 --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/irq.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +from lib.py import ksft_run, ksft_exit +from lib.py import ksft_ge, ksft_eq +from lib.py import KsftSkipEx +from lib.py import ksft_disruptive +from lib.py import EthtoolFamily, NetdevFamily +from lib.py import NetDrvEnv +from lib.py import cmd, ip, defer + + +def read_affinity(irq) -> str: + with open(f'/proc/irq/{irq}/smp_affinity', 'r') as fp: + return fp.read().lstrip("0,").strip() + + +def write_affinity(irq, what) -> str: + if what != read_affinity(irq): + with open(f'/proc/irq/{irq}/smp_affinity', 'w') as fp: + fp.write(what) + + +def check_irqs_reported(cfg) -> None: + """ Check that device reports IRQs for NAPI instances """ + napis = cfg.netnl.napi_get({"ifindex": cfg.ifindex}, dump=True) + irqs = sum(['irq' in x for x in napis]) + + ksft_ge(irqs, 1) + ksft_eq(irqs, len(napis)) + + +def _check_reconfig(cfg, reconfig_cb) -> None: + napis = cfg.netnl.napi_get({"ifindex": cfg.ifindex}, dump=True) + for n in reversed(napis): + if 'irq' in n: + break + else: + raise KsftSkipEx(f"Device has no NAPI with IRQ attribute (#napis: {len(napis)}") + + old = read_affinity(n['irq']) + # pick an affinity that's not the current one + new = "3" if old != "3" else "5" + write_affinity(n['irq'], new) + defer(write_affinity, n['irq'], old) + + reconfig_cb(cfg) + + ksft_eq(read_affinity(n['irq']), new, comment="IRQ affinity changed after reconfig") + + +def check_reconfig_queues(cfg) -> None: + def reconfig(cfg) -> None: + channels = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}}) + if channels['combined-count'] == 0: + rx_type = 'rx' + else: + rx_type = 'combined' + cur_queue_cnt = channels[f'{rx_type}-count'] + max_queue_cnt = channels[f'{rx_type}-max'] + + cmd(f"ethtool -L {cfg.ifname} {rx_type} 1") + cmd(f"ethtool -L {cfg.ifname} {rx_type} {max_queue_cnt}") + cmd(f"ethtool -L {cfg.ifname} {rx_type} {cur_queue_cnt}") + + _check_reconfig(cfg, reconfig) + + +def check_reconfig_xdp(cfg) -> None: + def reconfig(cfg) -> None: + ip(f"link set dev %s xdp obj %s sec xdp" % + (cfg.ifname, cfg.rpath("xdp_dummy.bpf.o"))) + ip(f"link set dev %s xdp off" % cfg.ifname) + + _check_reconfig(cfg, reconfig) + + +@ksft_disruptive +def check_down(cfg) -> None: + def reconfig(cfg) -> None: + ip("link set dev %s down" % cfg.ifname) + ip("link set dev %s up" % cfg.ifname) + + _check_reconfig(cfg, reconfig) + + +def main() -> None: + with NetDrvEnv(__file__, nsim_test=False) as cfg: + cfg.ethnl = EthtoolFamily() + cfg.netnl = NetdevFamily() + + ksft_run([check_irqs_reported, check_reconfig_queues, + check_reconfig_xdp, check_down], + args=(cfg, )) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c b/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c new file mode 100644 index 000000000000..d988b2e0cee8 --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define KBUILD_MODNAME "xdp_dummy" +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +SEC("xdp") +int xdp_dummy_prog(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py index 96b33b5ef9dd..fd4d674e6c72 100644 --- a/tools/testing/selftests/drivers/net/lib/py/env.py +++ b/tools/testing/selftests/drivers/net/lib/py/env.py @@ -58,14 +58,20 @@ class NetDrvEnv(NetDrvEnvBase): """ Class for a single NIC / host env, with no remote end """ - def __init__(self, src_path, **kwargs): + def __init__(self, src_path, nsim_test=None, **kwargs): super().__init__(src_path) self._ns = None if 'NETIF' in self.env: + if nsim_test is True: + raise KsftXfailEx("Test only works on netdevsim") + self.dev = ip("-d link show dev " + self.env['NETIF'], json=True)[0] else: + if nsim_test is False: + raise KsftXfailEx("Test does not work on netdevsim") + self._ns = NetdevSimDev(**kwargs) self.dev = self._ns.nsims[0].dev self.ifname = self.dev['ifname'] |