summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2016-12-06 13:53:49 +0200
committerSaeed Mahameed <saeedm@mellanox.com>2017-02-06 18:20:17 +0200
commita6f402e4990145252ce4fde59b273fa7e4f91e1b (patch)
treee9629cd82413793b5a8c12d736cdeaa44f4e16cc /drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
parent2b31f7ae5f645edd852addfca445895b5806f3f9 (diff)
net/mlx5e: Tx, no inline copy on ConnectX-5
ConnectX-5 and later HW generations will report min inline mode == MLX5_INLINE_MODE_NONE, which means driver is not required to copy packet headers to inline fields of TX WQE. When inline is not required, vlan insertion will be handled in the TX descriptor rather than copy to inline. For LSO case driver is still required to copy headers, for the HW to duplicate on wire. This will improve CPU utilization and boost TX performance. Tested with pktgen burst single flow: CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz HCA: Mellanox Technologies MT28800 Family [ConnectX-5 Ex] Before: 15.1Mpps After: 17.2Mpps Improvement: 14% Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_tx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 678c07c8fbb0..f193128bac4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
int hlen;
switch (mode) {
+ case MLX5_INLINE_MODE_NONE:
+ return 0;
case MLX5_INLINE_MODE_TCP_UDP:
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
@@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
wi->num_bytes = num_bytes;
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data,
- &skb_len);
- ihs += VLAN_HLEN;
- } else {
- memcpy(eseg->inline_hdr.start, skb_data, ihs);
- mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (ihs) {
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
+ ihs += VLAN_HLEN;
+ } else {
+ memcpy(eseg->inline_hdr.start, skb_data, ihs);
+ mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ }
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+ } else if (skb_vlan_tag_present(skb)) {
+ eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
}
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
-
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start),
- MLX5_SEND_WQE_DS);
- dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
wi->num_dma = 0;