summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/drivers/net')
-rw-r--r--tools/testing/selftests/drivers/net/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh54
-rw-r--r--tools/testing/selftests/drivers/net/hw/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile11
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py45
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py1
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py222
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c789
-rw-r--r--tools/testing/selftests/drivers/net/hw/nic_link_layer.py113
-rw-r--r--tools/testing/selftests/drivers/net/hw/nic_performance.py137
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py107
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py20
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh12
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh85
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh167
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh118
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh138
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh10
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_ets.sh26
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh213
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh32
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_sample.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh40
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/Makefile3
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/config1
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/ethtool-features.sh31
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/macsec-offload.sh117
-rwxr-xr-xtools/testing/selftests/drivers/net/shaper.py461
36 files changed, 2536 insertions, 469 deletions
diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile
index 39fb97a8c1df..0fec8f9801ad 100644
--- a/tools/testing/selftests/drivers/net/Makefile
+++ b/tools/testing/selftests/drivers/net/Makefile
@@ -9,6 +9,7 @@ TEST_PROGS := \
ping.py \
queues.py \
stats.py \
+ shaper.py \
# end of TEST_PROGS
include ../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
index 41d0859feb7d..edc56e2cc606 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
@@ -11,6 +11,8 @@ ALL_TESTS="
lib_dir=$(dirname "$0")
source ${lib_dir}/bond_topo_3d1c.sh
+c_maddr="33:33:00:00:00:10"
+g_maddr="33:33:00:00:02:54"
skip_prio()
{
@@ -240,6 +242,54 @@ arp_validate_test()
done
}
+# Testing correct multicast groups are added to slaves for ns targets
+arp_validate_mcast()
+{
+ RET=0
+ local arp_valid=$(cmd_jq "ip -n ${s_ns} -j -d link show bond0" ".[].linkinfo.info_data.arp_validate")
+ local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+
+ for i in $(seq 0 2); do
+ maddr_list=$(ip -n ${s_ns} maddr show dev eth${i})
+
+ # arp_valid == 0 or active_slave should not join any maddrs
+ if { [ "$arp_valid" == "null" ] || [ "eth${i}" == ${active_slave} ]; } && \
+ echo "$maddr_list" | grep -qE "${c_maddr}|${g_maddr}"; then
+ RET=1
+ check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group"
+ # arp_valid != 0 and backup_slave should join both maddrs
+ elif [ "$arp_valid" != "null" ] && [ "eth${i}" != ${active_slave} ] && \
+ ( ! echo "$maddr_list" | grep -q "${c_maddr}" || \
+ ! echo "$maddr_list" | grep -q "${m_maddr}"); then
+ RET=1
+ check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group"
+ fi
+ done
+
+ # Do failover
+ ip -n ${s_ns} link set ${active_slave} down
+ # wait for active link change
+ slowwait 2 active_slave_changed $active_slave
+ active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+
+ for i in $(seq 0 2); do
+ maddr_list=$(ip -n ${s_ns} maddr show dev eth${i})
+
+ # arp_valid == 0 or active_slave should not join any maddrs
+ if { [ "$arp_valid" == "null" ] || [ "eth${i}" == ${active_slave} ]; } && \
+ echo "$maddr_list" | grep -qE "${c_maddr}|${g_maddr}"; then
+ RET=1
+ check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group"
+ # arp_valid != 0 and backup_slave should join both maddrs
+ elif [ "$arp_valid" != "null" ] && [ "eth${i}" != ${active_slave} ] && \
+ ( ! echo "$maddr_list" | grep -q "${c_maddr}" || \
+ ! echo "$maddr_list" | grep -q "${m_maddr}"); then
+ RET=1
+ check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group"
+ fi
+ done
+}
+
arp_validate_arp()
{
local mode=$1
@@ -261,8 +311,10 @@ arp_validate_ns()
fi
for val in $(seq 0 6); do
- arp_validate_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} arp_validate $val"
+ arp_validate_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6},${c_ip6} arp_validate $val"
log_test "arp_validate" "$mode ns_ip6_target arp_validate $val"
+ arp_validate_mcast
+ log_test "arp_validate" "join mcast group"
done
}
diff --git a/tools/testing/selftests/drivers/net/hw/.gitignore b/tools/testing/selftests/drivers/net/hw/.gitignore
new file mode 100644
index 000000000000..e9fe6ede681a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/.gitignore
@@ -0,0 +1 @@
+ncdevmem
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index c9f2f48fc30f..21ba64ce1e34 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -3,6 +3,7 @@
TEST_PROGS = \
csum.py \
devlink_port_split.py \
+ devmem.py \
ethtool.sh \
ethtool_extended_state.sh \
ethtool_mm.sh \
@@ -10,6 +11,8 @@ TEST_PROGS = \
hw_stats_l3.sh \
hw_stats_l3_gre.sh \
loopback.sh \
+ nic_link_layer.py \
+ nic_performance.py \
pp_alloc_fail.py \
rss_ctx.py \
#
@@ -26,4 +29,12 @@ TEST_INCLUDES := \
../../../net/forwarding/tc_common.sh \
#
+# YNL files, must be before "include ..lib.mk"
+YNL_GEN_FILES := ncdevmem
+TEST_GEN_FILES += $(YNL_GEN_FILES)
+
include ../../../lib.mk
+
+# YNL build
+YNL_GENS := ethtool netdev
+include ../../../net/ynl.mk
diff --git a/tools/testing/selftests/drivers/net/hw/devmem.py b/tools/testing/selftests/drivers/net/hw/devmem.py
new file mode 100755
index 000000000000..1223f0f5c10c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/devmem.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+from lib.py import ksft_run, ksft_exit
+from lib.py import ksft_eq, KsftSkipEx
+from lib.py import NetDrvEpEnv
+from lib.py import bkg, cmd, rand_port, wait_port_listen
+from lib.py import ksft_disruptive
+
+
+def require_devmem(cfg):
+ if not hasattr(cfg, "_devmem_probed"):
+ port = rand_port()
+ probe_command = f"./ncdevmem -f {cfg.ifname}"
+ cfg._devmem_supported = cmd(probe_command, fail=False, shell=True).ret == 0
+ cfg._devmem_probed = True
+
+ if not cfg._devmem_supported:
+ raise KsftSkipEx("Test requires devmem support")
+
+
+@ksft_disruptive
+def check_rx(cfg) -> None:
+ cfg.require_v6()
+ require_devmem(cfg)
+
+ port = rand_port()
+ listen_cmd = f"./ncdevmem -l -f {cfg.ifname} -s {cfg.v6} -p {port}"
+
+ with bkg(listen_cmd) as socat:
+ wait_port_listen(port)
+ cmd(f"echo -e \"hello\\nworld\"| socat -u - TCP6:[{cfg.v6}]:{port}", host=cfg.remote, shell=True)
+
+ ksft_eq(socat.stdout.strip(), "hello\nworld")
+
+
+def main() -> None:
+ with NetDrvEpEnv(__file__) as cfg:
+ ksft_run([check_rx],
+ args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
index b582885786f5..399789a9676a 100644
--- a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
@@ -9,6 +9,7 @@ try:
sys.path.append(KSFT_DIR.as_posix())
from net.lib.py import *
from drivers.net.lib.py import *
+ from .linkconfig import LinkConfig
except ModuleNotFoundError as e:
ksft_pr("Failed importing `net` library from kernel sources")
ksft_pr(str(e))
diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py b/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py
new file mode 100644
index 000000000000..db84000fc75b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/lib/py/linkconfig.py
@@ -0,0 +1,222 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from lib.py import cmd, ethtool, ip
+from lib.py import ksft_pr, ksft_eq, KsftSkipEx
+from typing import Optional
+import re
+import time
+import json
+
+#The LinkConfig class is implemented to handle the link layer configurations.
+#Required minimum ethtool version is 6.10
+
+class LinkConfig:
+ """Class for handling the link layer configurations"""
+ def __init__(self, cfg: object) -> None:
+ self.cfg = cfg
+ self.partner_netif = self.get_partner_netif_name()
+
+ """Get the initial link configuration of local interface"""
+ self.common_link_modes = self.get_common_link_modes()
+
+ def get_partner_netif_name(self) -> Optional[str]:
+ partner_netif = None
+ try:
+ if not self.verify_link_up():
+ return None
+ """Get partner interface name"""
+ partner_json_output = ip("addr show", json=True, host=self.cfg.remote)
+ for interface in partner_json_output:
+ for addr in interface.get('addr_info', []):
+ if addr.get('local') == self.cfg.remote_addr:
+ partner_netif = interface['ifname']
+ ksft_pr(f"Partner Interface name: {partner_netif}")
+ if partner_netif is None:
+ ksft_pr("Unable to get the partner interface name")
+ except Exception as e:
+ print(f"Unexpected error occurred while getting partner interface name: {e}")
+ self.partner_netif = partner_netif
+ return partner_netif
+
+ def verify_link_up(self) -> bool:
+ """Verify whether the local interface link is up"""
+ with open(f"/sys/class/net/{self.cfg.ifname}/operstate", "r") as fp:
+ link_state = fp.read().strip()
+
+ if link_state == "down":
+ ksft_pr(f"Link state of interface {self.cfg.ifname} is DOWN")
+ return False
+ else:
+ return True
+
+ def reset_interface(self, local: bool = True, remote: bool = True) -> bool:
+ ksft_pr("Resetting interfaces in local and remote")
+ if remote:
+ if self.verify_link_up():
+ if self.partner_netif is not None:
+ ifname = self.partner_netif
+ link_up_cmd = f"ip link set up {ifname}"
+ link_down_cmd = f"ip link set down {ifname}"
+ reset_cmd = f"{link_down_cmd} && sleep 5 && {link_up_cmd}"
+ try:
+ cmd(reset_cmd, host=self.cfg.remote)
+ except Exception as e:
+ ksft_pr(f"Unexpected error occurred while resetting remote: {e}")
+ else:
+ ksft_pr("Partner interface not available")
+ if local:
+ ifname = self.cfg.ifname
+ link_up_cmd = f"ip link set up {ifname}"
+ link_down_cmd = f"ip link set down {ifname}"
+ reset_cmd = f"{link_down_cmd} && sleep 5 && {link_up_cmd}"
+ try:
+ cmd(reset_cmd)
+ except Exception as e:
+ ksft_pr(f"Unexpected error occurred while resetting local: {e}")
+ time.sleep(10)
+ if self.verify_link_up() and self.get_ethtool_field("link-detected"):
+ ksft_pr("Local and remote interfaces reset to original state")
+ return True
+ else:
+ ksft_pr("Error occurred after resetting interfaces. Link is DOWN.")
+ return False
+
+ def set_speed_and_duplex(self, speed: str, duplex: str, autoneg: bool = True) -> bool:
+ """Set the speed and duplex state for the interface"""
+ autoneg_state = "on" if autoneg is True else "off"
+ process = None
+ try:
+ process = ethtool(f"--change {self.cfg.ifname} speed {speed} duplex {duplex} autoneg {autoneg_state}")
+ except Exception as e:
+ ksft_pr(f"Unexpected error occurred while setting speed/duplex: {e}")
+ if process is None or process.ret != 0:
+ return False
+ else:
+ ksft_pr(f"Speed: {speed} Mbps, Duplex: {duplex} set for Interface: {self.cfg.ifname}")
+ return True
+
+ def verify_speed_and_duplex(self, expected_speed: str, expected_duplex: str) -> bool:
+ if not self.verify_link_up():
+ return False
+ """Verifying the speed and duplex state for the interface"""
+ with open(f"/sys/class/net/{self.cfg.ifname}/speed", "r") as fp:
+ actual_speed = fp.read().strip()
+ with open(f"/sys/class/net/{self.cfg.ifname}/duplex", "r") as fp:
+ actual_duplex = fp.read().strip()
+
+ ksft_eq(actual_speed, expected_speed)
+ ksft_eq(actual_duplex, expected_duplex)
+ return True
+
+ def set_autonegotiation_state(self, state: str, remote: bool = False) -> bool:
+ common_link_modes = self.common_link_modes
+ speeds, duplex_modes = self.get_speed_duplex_values(self.common_link_modes)
+ speed = speeds[0]
+ duplex = duplex_modes[0]
+ if not speed or not duplex:
+ ksft_pr("No speed or duplex modes found")
+ return False
+
+ speed_duplex_cmd = f"speed {speed} duplex {duplex}" if state == "off" else ""
+ if remote:
+ if not self.verify_link_up():
+ return False
+ """Set the autonegotiation state for the partner"""
+ command = f"-s {self.partner_netif} {speed_duplex_cmd} autoneg {state}"
+ partner_autoneg_change = None
+ """Set autonegotiation state for interface in remote pc"""
+ try:
+ partner_autoneg_change = ethtool(command, host=self.cfg.remote)
+ except Exception as e:
+ ksft_pr(f"Unexpected error occurred while changing auto-neg in remote: {e}")
+ if partner_autoneg_change is None or partner_autoneg_change.ret != 0:
+ ksft_pr(f"Not able to set autoneg parameter for interface {self.partner_netif}.")
+ return False
+ ksft_pr(f"Autoneg set as {state} for {self.partner_netif}")
+ else:
+ """Set the autonegotiation state for the interface"""
+ try:
+ process = ethtool(f"-s {self.cfg.ifname} {speed_duplex_cmd} autoneg {state}")
+ if process.ret != 0:
+ ksft_pr(f"Not able to set autoneg parameter for interface {self.cfg.ifname}")
+ return False
+ except Exception as e:
+ ksft_pr(f"Unexpected error occurred while changing auto-neg in local: {e}")
+ return False
+ ksft_pr(f"Autoneg set as {state} for {self.cfg.ifname}")
+ return True
+
+ def check_autoneg_supported(self, remote: bool = False) -> bool:
+ if not remote:
+ local_autoneg = self.get_ethtool_field("supports-auto-negotiation")
+ if local_autoneg is None:
+ ksft_pr(f"Unable to fetch auto-negotiation status for interface {self.cfg.ifname}")
+ """Return autoneg status of the local interface"""
+ return local_autoneg
+ else:
+ if not self.verify_link_up():
+ raise KsftSkipEx("Link is DOWN")
+ """Check remote auto-negotiation support status"""
+ partner_autoneg = False
+ if self.partner_netif is not None:
+ partner_autoneg = self.get_ethtool_field("supports-auto-negotiation", remote=True)
+ if partner_autoneg is None:
+ ksft_pr(f"Unable to fetch auto-negotiation status for interface {self.partner_netif}")
+ return partner_autoneg
+
+ def get_common_link_modes(self) -> set[str]:
+ common_link_modes = []
+ """Populate common link modes"""
+ link_modes = self.get_ethtool_field("supported-link-modes")
+ partner_link_modes = self.get_ethtool_field("link-partner-advertised-link-modes")
+ if link_modes is None:
+ raise KsftSkipEx(f"Link modes not available for {self.cfg.ifname}")
+ if partner_link_modes is None:
+ raise KsftSkipEx(f"Partner link modes not available for {self.cfg.ifname}")
+ common_link_modes = set(link_modes) and set(partner_link_modes)
+ return common_link_modes
+
+ def get_speed_duplex_values(self, link_modes: list[str]) -> tuple[list[str], list[str]]:
+ speed = []
+ duplex = []
+ """Check the link modes"""
+ for data in link_modes:
+ parts = data.split('/')
+ speed_value = re.match(r'\d+', parts[0])
+ if speed_value:
+ speed.append(speed_value.group())
+ else:
+ ksft_pr(f"No speed value found for interface {self.ifname}")
+ return None, None
+ duplex.append(parts[1].lower())
+ return speed, duplex
+
+ def get_ethtool_field(self, field: str, remote: bool = False) -> Optional[str]:
+ process = None
+ if not remote:
+ """Get the ethtool field value for the local interface"""
+ try:
+ process = ethtool(self.cfg.ifname, json=True)
+ except Exception as e:
+ ksft_pr("Required minimum ethtool version is 6.10")
+ ksft_pr(f"Unexpected error occurred while getting ethtool field in local: {e}")
+ return None
+ else:
+ if not self.verify_link_up():
+ return None
+ """Get the ethtool field value for the remote interface"""
+ self.cfg.require_cmd("ethtool", remote=True)
+ if self.partner_netif is None:
+ ksft_pr(f"Partner interface name is unavailable.")
+ return None
+ try:
+ process = ethtool(self.partner_netif, json=True, host=self.cfg.remote)
+ except Exception as e:
+ ksft_pr("Required minimum ethtool version is 6.10")
+ ksft_pr(f"Unexpected error occurred while getting ethtool field in remote: {e}")
+ return None
+ json_data = process[0]
+ """Check if the field exist in the json data"""
+ if field not in json_data:
+ raise KsftSkipEx(f"Field {field} does not exist in the output of interface {json_data["ifname"]}")
+ return json_data[field]
diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
new file mode 100644
index 000000000000..8e502a1f8f9b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tcpdevmem netcat. Works similarly to netcat but does device memory TCP
+ * instead of regular TCP. Uses udmabuf to mock a dmabuf provider.
+ *
+ * Usage:
+ *
+ * On server:
+ * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201
+ *
+ * On client:
+ * echo -n "hello\nworld" | nc -s <server IP> 5201 -p 5201
+ *
+ * Test data validation:
+ *
+ * On server:
+ * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7
+ *
+ * On client:
+ * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
+ * tr \\n \\0 | \
+ * head -c 5G | \
+ * nc <server IP> 5201 -p 5201
+ *
+ *
+ * Note this is compatible with regular netcat. i.e. the sender or receiver can
+ * be replaced with regular netcat to test the RX or TX path in isolation.
+ */
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
+#include <linux/uio.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#define __iovec_defined
+#include <fcntl.h>
+#include <malloc.h>
+#include <error.h>
+
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+
+#include <linux/memfd.h>
+#include <linux/dma-buf.h>
+#include <linux/udmabuf.h>
+#include <libmnl/libmnl.h>
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/genetlink.h>
+#include <linux/netdev.h>
+#include <linux/ethtool_netlink.h>
+#include <time.h>
+#include <net/if.h>
+
+#include "netdev-user.h"
+#include "ethtool-user.h"
+#include <ynl.h>
+
+#define PAGE_SHIFT 12
+#define TEST_PREFIX "ncdevmem"
+#define NUM_PAGES 16000
+
+#ifndef MSG_SOCK_DEVMEM
+#define MSG_SOCK_DEVMEM 0x2000000
+#endif
+
+static char *server_ip;
+static char *client_ip;
+static char *port;
+static size_t do_validation;
+static int start_queue = -1;
+static int num_queues = -1;
+static char *ifname;
+static unsigned int ifindex;
+static unsigned int dmabuf_id;
+
+struct memory_buffer {
+ int fd;
+ size_t size;
+
+ int devfd;
+ int memfd;
+ char *buf_mem;
+};
+
+struct memory_provider {
+ struct memory_buffer *(*alloc)(size_t size);
+ void (*free)(struct memory_buffer *ctx);
+ void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
+ size_t off, int n);
+};
+
+static struct memory_buffer *udmabuf_alloc(size_t size)
+{
+ struct udmabuf_create create;
+ struct memory_buffer *ctx;
+ int ret;
+
+ ctx = malloc(sizeof(*ctx));
+ if (!ctx)
+ error(1, ENOMEM, "malloc failed");
+
+ ctx->size = size;
+
+ ctx->devfd = open("/dev/udmabuf", O_RDWR);
+ if (ctx->devfd < 0)
+ error(1, errno,
+ "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
+ TEST_PREFIX);
+
+ ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
+ if (ctx->memfd < 0)
+ error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX);
+
+ ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
+ if (ret < 0)
+ error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+
+ ret = ftruncate(ctx->memfd, size);
+ if (ret == -1)
+ error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+
+ memset(&create, 0, sizeof(create));
+
+ create.memfd = ctx->memfd;
+ create.offset = 0;
+ create.size = size;
+ ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
+ if (ctx->fd < 0)
+ error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
+
+ ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ ctx->fd, 0);
+ if (ctx->buf_mem == MAP_FAILED)
+ error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX);
+
+ return ctx;
+}
+
+static void udmabuf_free(struct memory_buffer *ctx)
+{
+ munmap(ctx->buf_mem, ctx->size);
+ close(ctx->fd);
+ close(ctx->memfd);
+ close(ctx->devfd);
+ free(ctx);
+}
+
+static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
+ size_t off, int n)
+{
+ struct dma_buf_sync sync = {};
+
+ sync.flags = DMA_BUF_SYNC_START;
+ ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
+
+ memcpy(dst, src->buf_mem + off, n);
+
+ sync.flags = DMA_BUF_SYNC_END;
+ ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
+}
+
+static struct memory_provider udmabuf_memory_provider = {
+ .alloc = udmabuf_alloc,
+ .free = udmabuf_free,
+ .memcpy_from_device = udmabuf_memcpy_from_device,
+};
+
+static struct memory_provider *provider = &udmabuf_memory_provider;
+
+static void print_nonzero_bytes(void *ptr, size_t size)
+{
+ unsigned char *p = ptr;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ putchar(p[i]);
+}
+
+void validate_buffer(void *line, size_t size)
+{
+ static unsigned char seed = 1;
+ unsigned char *ptr = line;
+ int errors = 0;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i] != seed) {
+ fprintf(stderr,
+ "Failed validation: expected=%u, actual=%u, index=%lu\n",
+ seed, ptr[i], i);
+ errors++;
+ if (errors > 20)
+ error(1, 0, "validation failed.");
+ }
+ seed++;
+ if (seed == do_validation)
+ seed = 0;
+ }
+
+ fprintf(stdout, "Validated buffer\n");
+}
+
+static int rxq_num(int ifindex)
+{
+ struct ethtool_channels_get_req *req;
+ struct ethtool_channels_get_rsp *rsp;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int num = -1;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return -1;
+ }
+
+ req = ethtool_channels_get_req_alloc();
+ ethtool_channels_get_req_set_header_dev_index(req, ifindex);
+ rsp = ethtool_channels_get(ys, req);
+ if (rsp)
+ num = rsp->rx_count + rsp->combined_count;
+ ethtool_channels_get_req_free(req);
+ ethtool_channels_get_rsp_free(rsp);
+
+ ynl_sock_destroy(ys);
+
+ return num;
+}
+
+#define run_command(cmd, ...) \
+ ({ \
+ char command[256]; \
+ memset(command, 0, sizeof(command)); \
+ snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
+ fprintf(stderr, "Running: %s\n", command); \
+ system(command); \
+ })
+
+static int reset_flow_steering(void)
+{
+ /* Depending on the NIC, toggling ntuple off and on might not
+ * be allowed. Additionally, attempting to delete existing filters
+ * will fail if no filters are present. Therefore, do not enforce
+ * the exit status.
+ */
+
+ run_command("sudo ethtool -K %s ntuple off >&2", ifname);
+ run_command("sudo ethtool -K %s ntuple on >&2", ifname);
+ run_command(
+ "sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 ethtool -N %s delete >&2",
+ ifname, ifname);
+ return 0;
+}
+
+static const char *tcp_data_split_str(int val)
+{
+ switch (val) {
+ case 0:
+ return "off";
+ case 1:
+ return "auto";
+ case 2:
+ return "on";
+ default:
+ return "?";
+ }
+}
+
+static int configure_headersplit(bool on)
+{
+ struct ethtool_rings_get_req *get_req;
+ struct ethtool_rings_get_rsp *get_rsp;
+ struct ethtool_rings_set_req *req;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ret;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return -1;
+ }
+
+ req = ethtool_rings_set_req_alloc();
+ ethtool_rings_set_req_set_header_dev_index(req, ifindex);
+ /* 0 - off, 1 - auto, 2 - on */
+ ethtool_rings_set_req_set_tcp_data_split(req, on ? 2 : 0);
+ ret = ethtool_rings_set(ys, req);
+ if (ret < 0)
+ fprintf(stderr, "YNL failed: %s\n", ys->err.msg);
+ ethtool_rings_set_req_free(req);
+
+ if (ret == 0) {
+ get_req = ethtool_rings_get_req_alloc();
+ ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
+ get_rsp = ethtool_rings_get(ys, get_req);
+ ethtool_rings_get_req_free(get_req);
+ if (get_rsp)
+ fprintf(stderr, "TCP header split: %s\n",
+ tcp_data_split_str(get_rsp->tcp_data_split));
+ ethtool_rings_get_rsp_free(get_rsp);
+ }
+
+ ynl_sock_destroy(ys);
+
+ return ret;
+}
+
+static int configure_rss(void)
+{
+ return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue);
+}
+
+static int configure_channels(unsigned int rx, unsigned int tx)
+{
+ return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
+}
+
+static int configure_flow_steering(struct sockaddr_in6 *server_sin)
+{
+ const char *type = "tcp6";
+ const char *server_addr;
+ char buf[40];
+
+ inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
+ server_addr = buf;
+
+ if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
+ type = "tcp4";
+ server_addr = strrchr(server_addr, ':') + 1;
+ }
+
+ return run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2",
+ ifname,
+ type,
+ client_ip ? "src-ip" : "",
+ client_ip ?: "",
+ server_addr,
+ client_ip ? "src-port" : "",
+ client_ip ? port : "",
+ port, start_queue);
+}
+
+static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
+ struct netdev_queue_id *queues,
+ unsigned int n_queue_index, struct ynl_sock **ys)
+{
+ struct netdev_bind_rx_req *req = NULL;
+ struct netdev_bind_rx_rsp *rsp = NULL;
+ struct ynl_error yerr;
+
+ *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!*ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return -1;
+ }
+
+ req = netdev_bind_rx_req_alloc();
+ netdev_bind_rx_req_set_ifindex(req, ifindex);
+ netdev_bind_rx_req_set_fd(req, dmabuf_fd);
+ __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
+
+ rsp = netdev_bind_rx(*ys, req);
+ if (!rsp) {
+ perror("netdev_bind_rx");
+ goto err_close;
+ }
+
+ if (!rsp->_present.id) {
+ perror("id not present");
+ goto err_close;
+ }
+
+ fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
+ dmabuf_id = rsp->id;
+
+ netdev_bind_rx_req_free(req);
+ netdev_bind_rx_rsp_free(rsp);
+
+ return 0;
+
+err_close:
+ fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
+ netdev_bind_rx_req_free(req);
+ ynl_sock_destroy(*ys);
+ return -1;
+}
+
+static void enable_reuseaddr(int fd)
+{
+ int opt = 1;
+ int ret;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
+ if (ret)
+ error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX);
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
+ if (ret)
+ error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX);
+}
+
+static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
+{
+ int ret;
+
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = htons(port);
+
+ ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
+ if (ret != 1) {
+ /* fallback to plain IPv4 */
+ ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
+ if (ret != 1)
+ return -1;
+
+ /* add ::ffff prefix */
+ sin6->sin6_addr.s6_addr32[0] = 0;
+ sin6->sin6_addr.s6_addr32[1] = 0;
+ sin6->sin6_addr.s6_addr16[4] = 0;
+ sin6->sin6_addr.s6_addr16[5] = 0xffff;
+ }
+
+ return 0;
+}
+
+int do_server(struct memory_buffer *mem)
+{
+ char ctrl_data[sizeof(int) * 20000];
+ struct netdev_queue_id *queues;
+ size_t non_page_aligned_frags = 0;
+ struct sockaddr_in6 client_addr;
+ struct sockaddr_in6 server_sin;
+ size_t page_aligned_frags = 0;
+ size_t total_received = 0;
+ socklen_t client_addr_len;
+ bool is_devmem = false;
+ char *tmp_mem = NULL;
+ struct ynl_sock *ys;
+ char iobuf[819200];
+ char buffer[256];
+ int socket_fd;
+ int client_fd;
+ size_t i = 0;
+ int ret;
+
+ ret = parse_address(server_ip, atoi(port), &server_sin);
+ if (ret < 0)
+ error(1, 0, "parse server address");
+
+ if (reset_flow_steering())
+ error(1, 0, "Failed to reset flow steering\n");
+
+ if (configure_headersplit(1))
+ error(1, 0, "Failed to enable TCP header split\n");
+
+ /* Configure RSS to divert all traffic from our devmem queues */
+ if (configure_rss())
+ error(1, 0, "Failed to configure rss\n");
+
+ /* Flow steer our devmem flows to start_queue */
+ if (configure_flow_steering(&server_sin))
+ error(1, 0, "Failed to configure flow steering\n");
+
+ sleep(1);
+
+ queues = malloc(sizeof(*queues) * num_queues);
+
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+ error(1, 0, "Failed to bind\n");
+
+ tmp_mem = malloc(mem->size);
+ if (!tmp_mem)
+ error(1, ENOMEM, "malloc failed");
+
+ socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (socket_fd < 0)
+ error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+
+ enable_reuseaddr(socket_fd);
+
+ fprintf(stderr, "binding to address %s:%d\n", server_ip,
+ ntohs(server_sin.sin6_port));
+
+ ret = bind(socket_fd, &server_sin, sizeof(server_sin));
+ if (ret)
+ error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
+
+ ret = listen(socket_fd, 1);
+ if (ret)
+ error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
+
+ client_addr_len = sizeof(client_addr);
+
+ inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
+ sizeof(buffer));
+ fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
+ ntohs(server_sin.sin6_port));
+ client_fd = accept(socket_fd, &client_addr, &client_addr_len);
+
+ inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
+ sizeof(buffer));
+ fprintf(stderr, "Got connection from %s:%d\n", buffer,
+ ntohs(client_addr.sin6_port));
+
+ while (1) {
+ struct iovec iov = { .iov_base = iobuf,
+ .iov_len = sizeof(iobuf) };
+ struct dmabuf_cmsg *dmabuf_cmsg = NULL;
+ struct cmsghdr *cm = NULL;
+ struct msghdr msg = { 0 };
+ struct dmabuf_token token;
+ ssize_t ret;
+
+ is_devmem = false;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = ctrl_data;
+ msg.msg_controllen = sizeof(ctrl_data);
+ ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
+ fprintf(stderr, "recvmsg ret=%ld\n", ret);
+ if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
+ continue;
+ if (ret < 0) {
+ perror("recvmsg");
+ continue;
+ }
+ if (ret == 0) {
+ fprintf(stderr, "client exited\n");
+ goto cleanup;
+ }
+
+ i++;
+ for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+ if (cm->cmsg_level != SOL_SOCKET ||
+ (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
+ cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
+ fprintf(stderr, "skipping non-devmem cmsg\n");
+ continue;
+ }
+
+ dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
+ is_devmem = true;
+
+ if (cm->cmsg_type == SCM_DEVMEM_LINEAR) {
+ /* TODO: process data copied from skb's linear
+ * buffer.
+ */
+ fprintf(stderr,
+ "SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
+ dmabuf_cmsg->frag_size);
+
+ continue;
+ }
+
+ token.token_start = dmabuf_cmsg->frag_token;
+ token.token_count = 1;
+
+ total_received += dmabuf_cmsg->frag_size;
+ fprintf(stderr,
+ "received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
+ dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
+ dmabuf_cmsg->frag_offset % getpagesize(),
+ dmabuf_cmsg->frag_offset,
+ dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
+ total_received, dmabuf_cmsg->dmabuf_id);
+
+ if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
+ error(1, 0,
+ "received on wrong dmabuf_id: flow steering error\n");
+
+ if (dmabuf_cmsg->frag_size % getpagesize())
+ non_page_aligned_frags++;
+ else
+ page_aligned_frags++;
+
+ provider->memcpy_from_device(tmp_mem, mem,
+ dmabuf_cmsg->frag_offset,
+ dmabuf_cmsg->frag_size);
+
+ if (do_validation)
+ validate_buffer(tmp_mem,
+ dmabuf_cmsg->frag_size);
+ else
+ print_nonzero_bytes(tmp_mem,
+ dmabuf_cmsg->frag_size);
+
+ ret = setsockopt(client_fd, SOL_SOCKET,
+ SO_DEVMEM_DONTNEED, &token,
+ sizeof(token));
+ if (ret != 1)
+ error(1, 0,
+ "SO_DEVMEM_DONTNEED not enough tokens");
+ }
+ if (!is_devmem)
+ error(1, 0, "flow steering error\n");
+
+ fprintf(stderr, "total_received=%lu\n", total_received);
+ }
+
+ fprintf(stderr, "%s: ok\n", TEST_PREFIX);
+
+ fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ page_aligned_frags, non_page_aligned_frags);
+
+ fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ page_aligned_frags, non_page_aligned_frags);
+
+cleanup:
+
+ free(tmp_mem);
+ close(client_fd);
+ close(socket_fd);
+ ynl_sock_destroy(ys);
+
+ return 0;
+}
+
+void run_devmem_tests(void)
+{
+ struct netdev_queue_id *queues;
+ struct memory_buffer *mem;
+ struct ynl_sock *ys;
+ size_t i = 0;
+
+ mem = provider->alloc(getpagesize() * NUM_PAGES);
+
+ /* Configure RSS to divert all traffic from our devmem queues */
+ if (configure_rss())
+ error(1, 0, "rss error\n");
+
+ queues = calloc(num_queues, sizeof(*queues));
+
+ if (configure_headersplit(1))
+ error(1, 0, "Failed to configure header split\n");
+
+ if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+ error(1, 0, "Binding empty queues array should have failed\n");
+
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ if (configure_headersplit(0))
+ error(1, 0, "Failed to configure header split\n");
+
+ if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+ error(1, 0, "Configure dmabuf with header split off should have failed\n");
+
+ if (configure_headersplit(1))
+ error(1, 0, "Failed to configure header split\n");
+
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+ error(1, 0, "Failed to bind\n");
+
+ /* Deactivating a bound queue should not be legal */
+ if (!configure_channels(num_queues, num_queues - 1))
+ error(1, 0, "Deactivating a bound queue should be illegal.\n");
+
+ /* Closing the netlink socket does an implicit unbind */
+ ynl_sock_destroy(ys);
+
+ provider->free(mem);
+}
+
+int main(int argc, char *argv[])
+{
+ struct memory_buffer *mem;
+ int is_server = 0, opt;
+ int ret;
+
+ while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) {
+ switch (opt) {
+ case 'l':
+ is_server = 1;
+ break;
+ case 's':
+ server_ip = optarg;
+ break;
+ case 'c':
+ client_ip = optarg;
+ break;
+ case 'p':
+ port = optarg;
+ break;
+ case 'v':
+ do_validation = atoll(optarg);
+ break;
+ case 'q':
+ num_queues = atoi(optarg);
+ break;
+ case 't':
+ start_queue = atoi(optarg);
+ break;
+ case 'f':
+ ifname = optarg;
+ break;
+ case '?':
+ fprintf(stderr, "unknown option: %c\n", optopt);
+ break;
+ }
+ }
+
+ if (!ifname)
+ error(1, 0, "Missing -f argument\n");
+
+ ifindex = if_nametoindex(ifname);
+
+ if (!server_ip && !client_ip) {
+ if (start_queue < 0 && num_queues < 0) {
+ num_queues = rxq_num(ifindex);
+ if (num_queues < 0)
+ error(1, 0, "couldn't detect number of queues\n");
+ if (num_queues < 2)
+ error(1, 0,
+ "number of device queues is too low\n");
+ /* make sure can bind to multiple queues */
+ start_queue = num_queues / 2;
+ num_queues /= 2;
+ }
+
+ if (start_queue < 0 || num_queues < 0)
+ error(1, 0, "Both -t and -q are required\n");
+
+ run_devmem_tests();
+ return 0;
+ }
+
+ if (start_queue < 0 && num_queues < 0) {
+ num_queues = rxq_num(ifindex);
+ if (num_queues < 2)
+ error(1, 0, "number of device queues is too low\n");
+
+ num_queues = 1;
+ start_queue = rxq_num(ifindex) - num_queues;
+
+ if (start_queue < 0)
+ error(1, 0, "couldn't detect number of queues\n");
+
+ fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
+ }
+
+ for (; optind < argc; optind++)
+ fprintf(stderr, "extra arguments: %s\n", argv[optind]);
+
+ if (start_queue < 0)
+ error(1, 0, "Missing -t argument\n");
+
+ if (num_queues < 0)
+ error(1, 0, "Missing -q argument\n");
+
+ if (!server_ip)
+ error(1, 0, "Missing -s argument\n");
+
+ if (!port)
+ error(1, 0, "Missing -p argument\n");
+
+ mem = provider->alloc(getpagesize() * NUM_PAGES);
+ ret = is_server ? do_server(mem) : 1;
+ provider->free(mem);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/drivers/net/hw/nic_link_layer.py b/tools/testing/selftests/drivers/net/hw/nic_link_layer.py
new file mode 100644
index 000000000000..efd921180532
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/nic_link_layer.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+#Introduction:
+#This file has basic link layer tests for generic NIC drivers.
+#The test comprises of auto-negotiation, speed and duplex checks.
+#
+#Setup:
+#Connect the DUT PC with NIC card to partner pc back via ethernet medium of your choice(RJ45, T1)
+#
+# DUT PC Partner PC
+#┌───────────────────────┐ ┌──────────────────────────┐
+#│ │ │ │
+#│ │ │ │
+#│ ┌───────────┐ │ │
+#│ │DUT NIC │ Eth │ │
+#│ │Interface ─┼─────────────────────────┼─ any eth Interface │
+#│ └───────────┘ │ │
+#│ │ │ │
+#│ │ │ │
+#└───────────────────────┘ └──────────────────────────┘
+#
+#Configurations:
+#Required minimum ethtool version is 6.10 (supports json)
+#Default values:
+#time_delay = 8 #time taken to wait for transitions to happen, in seconds.
+
+import time
+import argparse
+from lib.py import ksft_run, ksft_exit, ksft_pr, ksft_eq
+from lib.py import KsftFailEx, KsftSkipEx
+from lib.py import NetDrvEpEnv
+from lib.py import LinkConfig
+
+def _pre_test_checks(cfg: object, link_config: LinkConfig) -> None:
+ if link_config.partner_netif is None:
+ KsftSkipEx("Partner interface is not available")
+ if not link_config.check_autoneg_supported() or not link_config.check_autoneg_supported(remote=True):
+ KsftSkipEx(f"Auto-negotiation not supported for interface {cfg.ifname} or {link_config.partner_netif}")
+ if not link_config.verify_link_up():
+ raise KsftSkipEx(f"Link state of interface {cfg.ifname} is DOWN")
+
+def verify_autonegotiation(cfg: object, expected_state: str, link_config: LinkConfig) -> None:
+ if not link_config.verify_link_up():
+ raise KsftSkipEx(f"Link state of interface {cfg.ifname} is DOWN")
+ """Verifying the autonegotiation state in partner"""
+ partner_autoneg_output = link_config.get_ethtool_field("auto-negotiation", remote=True)
+ if partner_autoneg_output is None:
+ KsftSkipEx(f"Auto-negotiation state not available for interface {link_config.partner_netif}")
+ partner_autoneg_state = "on" if partner_autoneg_output is True else "off"
+
+ ksft_eq(partner_autoneg_state, expected_state)
+
+ """Verifying the autonegotiation state of local"""
+ autoneg_output = link_config.get_ethtool_field("auto-negotiation")
+ if autoneg_output is None:
+ KsftSkipEx(f"Auto-negotiation state not available for interface {cfg.ifname}")
+ actual_state = "on" if autoneg_output is True else "off"
+
+ ksft_eq(actual_state, expected_state)
+
+ """Verifying the link establishment"""
+ link_available = link_config.get_ethtool_field("link-detected")
+ if link_available is None:
+ KsftSkipEx(f"Link status not available for interface {cfg.ifname}")
+ if link_available != True:
+ raise KsftSkipEx("Link not established at interface {cfg.ifname} after changing auto-negotiation")
+
+def test_autonegotiation(cfg: object, link_config: LinkConfig, time_delay: int) -> None:
+ _pre_test_checks(cfg, link_config)
+ for state in ["off", "on"]:
+ if not link_config.set_autonegotiation_state(state, remote=True):
+ raise KsftSkipEx(f"Unable to set auto-negotiation state for interface {link_config.partner_netif}")
+ if not link_config.set_autonegotiation_state(state):
+ raise KsftSkipEx(f"Unable to set auto-negotiation state for interface {cfg.ifname}")
+ time.sleep(time_delay)
+ verify_autonegotiation(cfg, state, link_config)
+
+def test_network_speed(cfg: object, link_config: LinkConfig, time_delay: int) -> None:
+ _pre_test_checks(cfg, link_config)
+ common_link_modes = link_config.common_link_modes
+ if not common_link_modes:
+ KsftSkipEx("No common link modes exist")
+ speeds, duplex_modes = link_config.get_speed_duplex_values(common_link_modes)
+
+ if speeds and duplex_modes and len(speeds) == len(duplex_modes):
+ for idx in range(len(speeds)):
+ speed = speeds[idx]
+ duplex = duplex_modes[idx]
+ if not link_config.set_speed_and_duplex(speed, duplex):
+ raise KsftFailEx(f"Unable to set speed and duplex parameters for {cfg.ifname}")
+ time.sleep(time_delay)
+ if not link_config.verify_speed_and_duplex(speed, duplex):
+ raise KsftSkipEx(f"Error occurred while verifying speed and duplex states for interface {cfg.ifname}")
+ else:
+ if not speeds or not duplex_modes:
+ KsftSkipEx(f"No supported speeds or duplex modes found for interface {cfg.ifname}")
+ else:
+ KsftSkipEx("Mismatch in the number of speeds and duplex modes")
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description="Run basic link layer tests for NIC driver")
+ parser.add_argument('--time-delay', type=int, default=8, help='Time taken to wait for transitions to happen(in seconds). Default is 8 seconds.')
+ args = parser.parse_args()
+ time_delay = args.time_delay
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ link_config = LinkConfig(cfg)
+ ksft_run(globs=globals(), case_pfx={"test_"}, args=(cfg, link_config, time_delay,))
+ link_config.reset_interface()
+ ksft_exit()
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/nic_performance.py b/tools/testing/selftests/drivers/net/hw/nic_performance.py
new file mode 100644
index 000000000000..201403b76ea3
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/nic_performance.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+#Introduction:
+#This file has basic performance test for generic NIC drivers.
+#The test comprises of throughput check for TCP and UDP streams.
+#
+#Setup:
+#Connect the DUT PC with NIC card to partner pc back via ethernet medium of your choice(RJ45, T1)
+#
+# DUT PC Partner PC
+#┌───────────────────────┐ ┌──────────────────────────┐
+#│ │ │ │
+#│ │ │ │
+#│ ┌───────────┐ │ │
+#│ │DUT NIC │ Eth │ │
+#│ │Interface ─┼─────────────────────────┼─ any eth Interface │
+#│ └───────────┘ │ │
+#│ │ │ │
+#│ │ │ │
+#└───────────────────────┘ └──────────────────────────┘
+#
+#Configurations:
+#To prevent interruptions, Add ethtool, ip to the sudoers list in remote PC and get the ssh key from remote.
+#Required minimum ethtool version is 6.10
+#Change the below configuration based on your hw needs.
+# """Default values"""
+#time_delay = 8 #time taken to wait for transitions to happen, in seconds.
+#test_duration = 10 #performance test duration for the throughput check, in seconds.
+#send_throughput_threshold = 80 #percentage of send throughput required to pass the check
+#receive_throughput_threshold = 50 #percentage of receive throughput required to pass the check
+
+import time
+import json
+import argparse
+from lib.py import ksft_run, ksft_exit, ksft_pr, ksft_true
+from lib.py import KsftFailEx, KsftSkipEx, GenerateTraffic
+from lib.py import NetDrvEpEnv, bkg, wait_port_listen
+from lib.py import cmd
+from lib.py import LinkConfig
+
+class TestConfig:
+ def __init__(self, time_delay: int, test_duration: int, send_throughput_threshold: int, receive_throughput_threshold: int) -> None:
+ self.time_delay = time_delay
+ self.test_duration = test_duration
+ self.send_throughput_threshold = send_throughput_threshold
+ self.receive_throughput_threshold = receive_throughput_threshold
+
+def _pre_test_checks(cfg: object, link_config: LinkConfig) -> None:
+ if not link_config.verify_link_up():
+ KsftSkipEx(f"Link state of interface {cfg.ifname} is DOWN")
+ common_link_modes = link_config.common_link_modes
+ if common_link_modes is None:
+ KsftSkipEx("No common link modes found")
+ if link_config.partner_netif == None:
+ KsftSkipEx("Partner interface is not available")
+ if link_config.check_autoneg_supported():
+ KsftSkipEx("Auto-negotiation not supported by local")
+ if link_config.check_autoneg_supported(remote=True):
+ KsftSkipEx("Auto-negotiation not supported by remote")
+ cfg.require_cmd("iperf3", remote=True)
+
+def check_throughput(cfg: object, link_config: LinkConfig, test_config: TestConfig, protocol: str, traffic: GenerateTraffic) -> None:
+ common_link_modes = link_config.common_link_modes
+ speeds, duplex_modes = link_config.get_speed_duplex_values(common_link_modes)
+ """Test duration in seconds"""
+ duration = test_config.test_duration
+
+ ksft_pr(f"{protocol} test")
+ test_type = "-u" if protocol == "UDP" else ""
+
+ send_throughput = []
+ receive_throughput = []
+ for idx in range(0, len(speeds)):
+ if link_config.set_speed_and_duplex(speeds[idx], duplex_modes[idx]) == False:
+ raise KsftFailEx(f"Not able to set speed and duplex parameters for {cfg.ifname}")
+ time.sleep(test_config.time_delay)
+ if not link_config.verify_link_up():
+ raise KsftSkipEx(f"Link state of interface {cfg.ifname} is DOWN")
+
+ send_command=f"{test_type} -b 0 -t {duration} --json"
+ receive_command=f"{test_type} -b 0 -t {duration} --reverse --json"
+
+ send_result = traffic.run_remote_test(cfg, command=send_command)
+ if send_result.ret != 0:
+ raise KsftSkipEx("Error occurred during data transmit: {send_result.stdout}")
+
+ send_output = send_result.stdout
+ send_data = json.loads(send_output)
+
+ """Convert throughput to Mbps"""
+ send_throughput.append(round(send_data['end']['sum_sent']['bits_per_second'] / 1e6, 2))
+ ksft_pr(f"{protocol}: Send throughput: {send_throughput[idx]} Mbps")
+
+ receive_result = traffic.run_remote_test(cfg, command=receive_command)
+ if receive_result.ret != 0:
+ raise KsftSkipEx("Error occurred during data receive: {receive_result.stdout}")
+
+ receive_output = receive_result.stdout
+ receive_data = json.loads(receive_output)
+
+ """Convert throughput to Mbps"""
+ receive_throughput.append(round(receive_data['end']['sum_received']['bits_per_second'] / 1e6, 2))
+ ksft_pr(f"{protocol}: Receive throughput: {receive_throughput[idx]} Mbps")
+
+ """Check whether throughput is not below the threshold (default values set at start)"""
+ for idx in range(0, len(speeds)):
+ send_threshold = float(speeds[idx]) * float(test_config.send_throughput_threshold / 100)
+ receive_threshold = float(speeds[idx]) * float(test_config.receive_throughput_threshold / 100)
+ ksft_true(send_throughput[idx] >= send_threshold, f"{protocol}: Send throughput is below threshold for {speeds[idx]} Mbps in {duplex_modes[idx]} duplex")
+ ksft_true(receive_throughput[idx] >= receive_threshold, f"{protocol}: Receive throughput is below threshold for {speeds[idx]} Mbps in {duplex_modes[idx]} duplex")
+
+def test_tcp_throughput(cfg: object, link_config: LinkConfig, test_config: TestConfig, traffic: GenerateTraffic) -> None:
+ _pre_test_checks(cfg, link_config)
+ check_throughput(cfg, link_config, test_config, 'TCP', traffic)
+
+def test_udp_throughput(cfg: object, link_config: LinkConfig, test_config: TestConfig, traffic: GenerateTraffic) -> None:
+ _pre_test_checks(cfg, link_config)
+ check_throughput(cfg, link_config, test_config, 'UDP', traffic)
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description="Run basic performance test for NIC driver")
+ parser.add_argument('--time-delay', type=int, default=8, help='Time taken to wait for transitions to happen(in seconds). Default is 8 seconds.')
+ parser.add_argument('--test-duration', type=int, default=10, help='Performance test duration for the throughput check, in seconds. Default is 10 seconds.')
+ parser.add_argument('--stt', type=int, default=80, help='Send throughput Threshold: Percentage of send throughput upon actual throughput required to pass the throughput check (in percentage). Default is 80.')
+ parser.add_argument('--rtt', type=int, default=50, help='Receive throughput Threshold: Percentage of receive throughput upon actual throughput required to pass the throughput check (in percentage). Default is 50.')
+ args=parser.parse_args()
+ test_config = TestConfig(args.time_delay, args.test_duration, args.stt, args.rtt)
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ traffic = GenerateTraffic(cfg)
+ link_config = LinkConfig(cfg)
+ ksft_run(globs=globals(), case_pfx={"test_"}, args=(cfg, link_config, test_config, traffic, ))
+ link_config.reset_interface()
+ ksft_exit()
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
index 9d7adb3cf33b..0b49ce7ae678 100755
--- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py
+++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
@@ -6,7 +6,7 @@ import random
from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt
from lib.py import NetDrvEpEnv
from lib.py import EthtoolFamily, NetdevFamily
-from lib.py import KsftSkipEx
+from lib.py import KsftSkipEx, KsftFailEx
from lib.py import rand_port
from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure
@@ -215,7 +215,7 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True):
defer(ethtool, f"-X {cfg.ifname} default")
else:
other_key = 'noise'
- flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id}"
ntuple = ethtool_create(cfg, "-N", flow)
defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
@@ -238,6 +238,32 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True):
else:
raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})")
+ if not main_ctx:
+ ethtool(f"-L {cfg.ifname} combined 4")
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id} action 1"
+ try:
+ # this targets queue 4, which doesn't exist
+ ntuple2 = ethtool_create(cfg, "-N", flow)
+ except CmdExitFailure:
+ pass
+ else:
+ raise Exception(f"Driver didn't prevent us from targeting a nonexistent queue (context {ctx_id})")
+ # change the table to target queues 0 and 2
+ ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 1 0")
+ # ntuple rule therefore targets queues 1 and 3
+ ntuple2 = ethtool_create(cfg, "-N", flow)
+ # should replace existing filter
+ ksft_eq(ntuple, ntuple2)
+ _send_traffic_check(cfg, port, ctx_ref, { 'target': (1, 3),
+ 'noise' : (0, 2) })
+ # Setting queue count to 3 should fail, queue 3 is used
+ try:
+ ethtool(f"-L {cfg.ifname} combined 3")
+ except CmdExitFailure:
+ pass
+ else:
+ raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})")
+
def test_rss_resize(cfg):
"""Test resizing of the RSS table.
@@ -429,7 +455,7 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
ksft_eq(max(data['rss-indirection-table']), 2 + i * 2 + 1, "Unexpected context cfg: " + str(data))
ports.append(rand_port())
- flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {ports[i]} context {ctx_id}"
ntuple = ethtool_create(cfg, "-N", flow)
defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
@@ -516,7 +542,7 @@ def test_rss_context_out_of_order(cfg, ctx_cnt=4):
ctx.append(defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete"))
ports.append(rand_port())
- flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}"
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {ports[i]} context {ctx_id}"
ntuple_id = ethtool_create(cfg, "-N", flow)
ntuple.append(defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}"))
@@ -569,7 +595,7 @@ def test_rss_context_overlap(cfg, other_ctx=0):
port = rand_port()
if other_ctx:
- flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {other_ctx}"
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {other_ctx}"
ntuple_id = ethtool_create(cfg, "-N", flow)
ntuple = defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
@@ -587,7 +613,7 @@ def test_rss_context_overlap(cfg, other_ctx=0):
# Now create a rule for context 1 and make sure traffic goes to a subset
if other_ctx:
ntuple.exec()
- flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}"
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id}"
ntuple_id = ethtool_create(cfg, "-N", flow)
defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
@@ -606,6 +632,72 @@ def test_rss_context_overlap2(cfg):
test_rss_context_overlap(cfg, True)
+def test_delete_rss_context_busy(cfg):
+ """
+ Test that deletion returns -EBUSY when an rss context is being used
+ by an ntuple filter.
+ """
+
+ require_ntuple(cfg)
+
+ # create additional rss context
+ ctx_id = ethtool_create(cfg, "-X", "context new")
+ ctx_deleter = defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ # utilize context from ntuple filter
+ port = rand_port()
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id}"
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
+
+ # attempt to delete in-use context
+ try:
+ ctx_deleter.exec_only()
+ ctx_deleter.cancel()
+ raise KsftFailEx(f"deleted context {ctx_id} used by rule {ntuple_id}")
+ except CmdExitFailure:
+ pass
+
+
+def test_rss_ntuple_addition(cfg):
+ """
+ Test that the queue offset (ring_cookie) of an ntuple rule is added
+ to the queue number read from the indirection table.
+ """
+
+ require_ntuple(cfg)
+
+ queue_cnt = len(_get_rx_cnts(cfg))
+ if queue_cnt < 4:
+ try:
+ ksft_pr(f"Increasing queue count {queue_cnt} -> 4")
+ ethtool(f"-L {cfg.ifname} combined 4")
+ defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}")
+ except:
+ raise KsftSkipEx("Not enough queues for the test")
+
+ # Use queue 0 for normal traffic
+ ethtool(f"-X {cfg.ifname} equal 1")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ # create additional rss context
+ ctx_id = ethtool_create(cfg, "-X", "context new equal 2")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ # utilize context from ntuple filter
+ port = rand_port()
+ flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id} action 2"
+ try:
+ ntuple_id = ethtool_create(cfg, "-N", flow)
+ except CmdExitFailure:
+ raise KsftSkipEx("Ntuple filter with RSS and nonzero action not supported")
+ defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")
+
+ _send_traffic_check(cfg, port, f"context {ctx_id}", { 'target': (2, 3),
+ 'empty' : (1,),
+ 'noise' : (0,) })
+
+
def main() -> None:
with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
cfg.ethnl = EthtoolFamily()
@@ -616,7 +708,8 @@ def main() -> None:
test_rss_context, test_rss_context4, test_rss_context32,
test_rss_context_dump, test_rss_context_queue_reconfigure,
test_rss_context_overlap, test_rss_context_overlap2,
- test_rss_context_out_of_order, test_rss_context4_create_with_cfg],
+ test_rss_context_out_of_order, test_rss_context4_create_with_cfg,
+ test_delete_rss_context_busy, test_rss_ntuple_addition],
args=(cfg, ))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index d9c10613ae67..da5af2c680fa 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -2,7 +2,7 @@
import time
-from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
+from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen, bkg
class GenerateTraffic:
def __init__(self, env, port=None):
@@ -23,6 +23,24 @@ class GenerateTraffic:
self.stop(verbose=True)
raise Exception("iperf3 traffic did not ramp up")
+ def run_remote_test(self, env: object, port=None, command=None):
+ if port is None:
+ port = rand_port()
+ try:
+ server_cmd = f"iperf3 -s 1 -p {port} --one-off"
+ with bkg(server_cmd, host=env.remote):
+ #iperf3 opens TCP connection as default in server
+ #-u to be specified in client command for UDP
+ wait_port_listen(port, host=env.remote)
+ except Exception as e:
+ raise Exception(f"Unexpected error occurred while running server command: {e}")
+ try:
+ client_cmd = f"iperf3 -c {env.remote_addr} -p {port} {command}"
+ proc = cmd(client_cmd)
+ return proc
+ except Exception as e:
+ raise Exception(f"Unexpected error occurred while running client command: {e}")
+
def _wait_pkts(self, pkt_cnt=None, pps=None):
"""
Wait until we've seen pkt_cnt or until traffic ramps up to pps.
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap.sh
index 89b55e946eed..36055279ba92 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap.sh
@@ -116,7 +116,7 @@ dev_del_test()
log_test "Device delete"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
index 160891dcb4bc..db5806d189bb 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
@@ -595,7 +595,7 @@ irif_disabled_test()
log_test "Ingress RIF disabled"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
ip link set dev $rp1 nomaster
__addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
ip link del dev br0 type bridge
@@ -645,7 +645,7 @@ erif_disabled_test()
log_test "Egress RIF disabled"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
__addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
ip link del dev br0 type bridge
devlink_trap_action_set $trap_name "drop"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
index 190c1b6b5365..5d6d88b600f0 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
@@ -202,7 +202,7 @@ mtu_value_is_too_small_test()
mtu_restore $rp2
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower
}
@@ -235,7 +235,7 @@ __ttl_value_is_too_small_test()
log_test "TTL value is too small: TTL=$ttl_val"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $h1 ingress protocol ip pref 1 handle 101 flower
}
@@ -299,7 +299,7 @@ __mc_reverse_path_forwarding_test()
log_test "Multicast reverse path forwarding: $desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $rp2 egress protocol $proto pref 1 handle 101 flower
}
@@ -347,7 +347,7 @@ __reject_route_test()
log_test "Reject route: $desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
ip route del unreachable $unreachable
tc filter del dev $h1 ingress protocol $proto pref 1 handle 101 flower
}
@@ -542,7 +542,7 @@ ipv4_lpm_miss_test()
log_test "LPM miss: IPv4"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
vrf_without_routes_destroy
}
@@ -569,7 +569,7 @@ ipv6_lpm_miss_test()
log_test "LPM miss: IPv6"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
vrf_without_routes_destroy
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
index 0bd5ffc218ac..29a672c2270f 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
@@ -45,63 +45,52 @@ source $lib_dir/devlink_lib.sh
h1_create()
{
simple_if_init $h1 192.0.2.1/24
+ defer simple_if_fini $h1 192.0.2.1/24
+
mtu_set $h1 10000
+ defer mtu_restore $h1
ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
-}
-
-h1_destroy()
-{
- ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
-
- mtu_restore $h1
- simple_if_fini $h1 192.0.2.1/24
+ defer ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
}
h2_create()
{
simple_if_init $h2 198.51.100.1/24
+ defer simple_if_fini $h2 198.51.100.1/24
+
mtu_set $h2 10000
+ defer mtu_restore $h2
ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
-}
-
-h2_destroy()
-{
- ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
-
- mtu_restore $h2
- simple_if_fini $h2 198.51.100.1/24
+ defer ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
}
router_create()
{
ip link set dev $rp1 up
+ defer ip link set dev $rp1 down
+
ip link set dev $rp2 up
+ defer ip link set dev $rp2 down
__addr_add_del $rp1 add 192.0.2.2/24
+ defer __addr_add_del $rp1 del 192.0.2.2/24
+
__addr_add_del $rp2 add 198.51.100.2/24
+ defer __addr_add_del $rp2 del 198.51.100.2/24
+
mtu_set $rp1 10000
+ defer mtu_restore $rp1
+
mtu_set $rp2 10000
+ defer mtu_restore $rp2
ip -4 route add blackhole 198.51.100.100
+ defer ip -4 route del blackhole 198.51.100.100
devlink trap set $DEVLINK_DEV trap blackhole_route action trap
-}
-
-router_destroy()
-{
- devlink trap set $DEVLINK_DEV trap blackhole_route action drop
-
- ip -4 route del blackhole 198.51.100.100
-
- mtu_restore $rp2
- mtu_restore $rp1
- __addr_add_del $rp2 del 198.51.100.2/24
- __addr_add_del $rp1 del 192.0.2.2/24
-
- ip link set dev $rp2 down
- ip link set dev $rp1 down
+ defer devlink trap set $DEVLINK_DEV trap blackhole_route action drop
}
setup_prepare()
@@ -114,7 +103,11 @@ setup_prepare()
rp1_mac=$(mac_get $rp1)
+ # Reload to ensure devlink-trap settings are back to default.
+ defer devlink_reload
+
vrf_prepare
+ defer vrf_cleanup
h1_create
h2_create
@@ -122,21 +115,6 @@ setup_prepare()
router_create
}
-cleanup()
-{
- pre_cleanup
-
- router_destroy
-
- h2_destroy
- h1_destroy
-
- vrf_cleanup
-
- # Reload to ensure devlink-trap settings are back to default.
- devlink_reload
-}
-
rate_limits_test()
{
RET=0
@@ -214,7 +192,10 @@ __rate_test()
# by the policer. Make sure measured received rate is about 1000 pps
log_info "=== Tx rate: Highest, Policer rate: 1000 pps ==="
+ defer_scope_push
+
start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac
+ defer stop_traffic $!
sleep 5 # Take measurements when rate is stable
@@ -229,13 +210,16 @@ __rate_test()
check_err $? "Expected non-zero policer drop rate, got 0"
log_info "Measured policer drop rate of $drop_rate pps"
- stop_traffic
+ defer_scope_pop
# Send packets at a rate of 1000 pps and make sure they are not dropped
# by the policer
log_info "=== Tx rate: 1000 pps, Policer rate: 1000 pps ==="
+ defer_scope_push
+
start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -d 1msec
+ defer stop_traffic $!
sleep 5 # Take measurements when rate is stable
@@ -244,7 +228,7 @@ __rate_test()
check_err $? "Expected zero policer drop rate, got a drop rate of $drop_rate pps"
log_info "Measured policer drop rate of $drop_rate pps"
- stop_traffic
+ defer_scope_pop
# Unbind the policer and send packets at highest possible rate. Make
# sure they are not dropped by the policer and that the measured
@@ -253,7 +237,10 @@ __rate_test()
devlink trap group set $DEVLINK_DEV group l3_drops nopolicer
+ defer_scope_push
+
start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac
+ defer stop_traffic $!
rate=$(trap_rate_get)
(( rate > 1000 ))
@@ -265,7 +252,7 @@ __rate_test()
check_err $? "Expected zero policer drop rate, got a drop rate of $drop_rate pps"
log_info "Measured policer drop rate of $drop_rate pps"
- stop_traffic
+ defer_scope_pop
log_test "Trap policer rate"
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh
index e9a82cae8c9a..4ac1dae92d0f 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh
@@ -176,7 +176,7 @@ ecn_decap_test()
log_test "$desc: Inner ECN is not ECT and outer is $ecn_desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ip pref 1 handle 101 flower
}
@@ -207,7 +207,7 @@ no_matching_tunnel_test()
log_test "$desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ip pref 1 handle 101 flower
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh
index 878125041fc3..fce885184404 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh
@@ -176,7 +176,7 @@ ecn_decap_test()
log_test "$desc: Inner ECN is not ECT and outer is $ecn_desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower
}
@@ -207,7 +207,7 @@ no_matching_tunnel_test()
log_test "$desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh
index 5f6eb965cfd1..7aca8e5922cf 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh
@@ -183,7 +183,7 @@ ecn_decap_test()
log_test "$desc: Inner ECN is not ECT and outer is $ecn_desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ip pref 1 handle 101 flower
}
@@ -253,7 +253,7 @@ corrupted_packet_test()
log_test "$desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ip pref 1 handle 101 flower
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh
index f6c16cbb6cf7..4599c331240b 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh
@@ -188,7 +188,7 @@ ecn_decap_test()
log_test "$desc: Inner ECN is not ECT and outer is $ecn_desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower
}
@@ -262,7 +262,7 @@ corrupted_packet_test()
log_test "$desc"
- kill $mz_pid && wait $mz_pid &> /dev/null
+ kill_process $mz_pid
tc filter del dev $swp1 egress protocol ipv6 pref 1 handle 101 flower
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
index fee74f215cec..d5b6f2cc9a29 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
@@ -58,65 +58,62 @@ source qos_lib.sh
h1_create()
{
simple_if_init $h1
+ defer simple_if_fini $h1
+
mtu_set $h1 10000
+ defer mtu_restore $h1
vlan_create $h1 111 v$h1 192.0.2.33/28
+ defer vlan_destroy $h1 111
ip link set dev $h1.111 type vlan egress-qos-map 0:1
}
-h1_destroy()
-{
- vlan_destroy $h1 111
-
- mtu_restore $h1
- simple_if_fini $h1
-}
-
h2_create()
{
simple_if_init $h2
+ defer simple_if_fini $h2
+
mtu_set $h2 10000
+ defer mtu_restore $h2
vlan_create $h2 222 v$h2 192.0.2.65/28
+ defer vlan_destroy $h2 222
ip link set dev $h2.222 type vlan egress-qos-map 0:2
}
-h2_destroy()
-{
- vlan_destroy $h2 222
-
- mtu_restore $h2
- simple_if_fini $h2
-}
-
h3_create()
{
simple_if_init $h3
+ defer simple_if_fini $h3
+
mtu_set $h3 10000
+ defer mtu_restore $h3
vlan_create $h3 111 v$h3 192.0.2.34/28
- vlan_create $h3 222 v$h3 192.0.2.66/28
-}
-
-h3_destroy()
-{
- vlan_destroy $h3 222
- vlan_destroy $h3 111
+ defer vlan_destroy $h3 111
- mtu_restore $h3
- simple_if_fini $h3
+ vlan_create $h3 222 v$h3 192.0.2.66/28
+ defer vlan_destroy $h3 222
}
switch_create()
{
ip link set dev $swp1 up
+ defer ip link set dev $swp1 down
+
mtu_set $swp1 10000
+ defer mtu_restore $swp1
ip link set dev $swp2 up
+ defer ip link set dev $swp2 down
+
mtu_set $swp2 10000
+ defer mtu_restore $swp2
# prio n -> TC n, strict scheduling
lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7
+ defer lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0
+
lldptool -T -i $swp3 -V ETS-CFG tsa=$(
)"0:strict,"$(
)"1:strict,"$(
@@ -129,85 +126,90 @@ switch_create()
sleep 1
ip link set dev $swp3 up
+ defer ip link set dev $swp3 down
+
mtu_set $swp3 10000
+ defer mtu_restore $swp3
+
tc qdisc replace dev $swp3 root handle 101: tbf rate 1gbit \
burst 128K limit 1G
+ defer tc qdisc del dev $swp3 root handle 101:
vlan_create $swp1 111
+ defer vlan_destroy $swp1 111
+
vlan_create $swp2 222
+ defer vlan_destroy $swp2 222
+
vlan_create $swp3 111
+ defer vlan_destroy $swp3 111
+
vlan_create $swp3 222
+ defer vlan_destroy $swp3 222
ip link add name br111 type bridge vlan_filtering 0
+ defer ip link del dev br111
ip link set dev br111 addrgenmode none
+
ip link set dev br111 up
+ defer ip link set dev br111 down
+
ip link set dev $swp1.111 master br111
+ defer ip link set dev $swp1.111 nomaster
+
ip link set dev $swp3.111 master br111
+ defer ip link set dev $swp3.111 nomaster
ip link add name br222 type bridge vlan_filtering 0
+ defer ip link del dev br222
ip link set dev br222 addrgenmode none
+
ip link set dev br222 up
+ defer ip link set dev br222 down
+
ip link set dev $swp2.222 master br222
+ defer ip link set dev $swp2.222 nomaster
+
ip link set dev $swp3.222 master br222
+ defer ip link set dev $swp3.222 nomaster
# Make sure that ingress quotas are smaller than egress so that there is
# room for both streams of traffic to be admitted to shared buffer.
devlink_pool_size_thtype_save 0
devlink_pool_size_thtype_set 0 dynamic 10000000
+ defer devlink_pool_size_thtype_restore 0
+
devlink_pool_size_thtype_save 4
devlink_pool_size_thtype_set 4 dynamic 10000000
+ defer devlink_pool_size_thtype_restore 4
devlink_port_pool_th_save $swp1 0
devlink_port_pool_th_set $swp1 0 6
+ defer devlink_port_pool_th_restore $swp1 0
+
devlink_tc_bind_pool_th_save $swp1 1 ingress
devlink_tc_bind_pool_th_set $swp1 1 ingress 0 6
+ defer devlink_tc_bind_pool_th_restore $swp1 1 ingress
devlink_port_pool_th_save $swp2 0
devlink_port_pool_th_set $swp2 0 6
+ defer devlink_port_pool_th_restore $swp2 0
+
devlink_tc_bind_pool_th_save $swp2 2 ingress
devlink_tc_bind_pool_th_set $swp2 2 ingress 0 6
+ defer devlink_tc_bind_pool_th_restore $swp2 2 ingress
devlink_tc_bind_pool_th_save $swp3 1 egress
devlink_tc_bind_pool_th_set $swp3 1 egress 4 7
+ defer devlink_tc_bind_pool_th_restore $swp3 1 egress
+
devlink_tc_bind_pool_th_save $swp3 2 egress
devlink_tc_bind_pool_th_set $swp3 2 egress 4 7
+ defer devlink_tc_bind_pool_th_restore $swp3 2 egress
+
devlink_port_pool_th_save $swp3 4
devlink_port_pool_th_set $swp3 4 7
-}
-
-switch_destroy()
-{
- devlink_port_pool_th_restore $swp3 4
- devlink_tc_bind_pool_th_restore $swp3 2 egress
- devlink_tc_bind_pool_th_restore $swp3 1 egress
-
- devlink_tc_bind_pool_th_restore $swp2 2 ingress
- devlink_port_pool_th_restore $swp2 0
-
- devlink_tc_bind_pool_th_restore $swp1 1 ingress
- devlink_port_pool_th_restore $swp1 0
-
- devlink_pool_size_thtype_restore 4
- devlink_pool_size_thtype_restore 0
-
- ip link del dev br222
- ip link del dev br111
-
- vlan_destroy $swp3 222
- vlan_destroy $swp3 111
- vlan_destroy $swp2 222
- vlan_destroy $swp1 111
-
- tc qdisc del dev $swp3 root handle 101:
- mtu_restore $swp3
- ip link set dev $swp3 down
- lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0
-
- mtu_restore $swp2
- ip link set dev $swp2 down
-
- mtu_restore $swp1
- ip link set dev $swp1 down
+ defer devlink_port_pool_th_restore $swp3 4
}
setup_prepare()
@@ -224,6 +226,7 @@ setup_prepare()
h3mac=$(mac_get $h3)
vrf_prepare
+ defer vrf_cleanup
h1_create
h2_create
@@ -231,18 +234,6 @@ setup_prepare()
switch_create
}
-cleanup()
-{
- pre_cleanup
-
- switch_destroy
- h3_destroy
- h2_destroy
- h1_destroy
-
- vrf_cleanup
-}
-
ping_ipv4()
{
ping_test $h1 192.0.2.34 " from H1"
@@ -261,21 +252,38 @@ rel()
"
}
+__run_hi_measure_rate()
+{
+ local what=$1; shift
+ local -a uc_rate
+
+ start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
+ defer stop_traffic $!
+
+ uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_2 "$what"))
+ check_err $? "Could not get high enough $what ingress rate"
+
+ echo ${uc_rate[@]}
+}
+
+run_hi_measure_rate()
+{
+ in_defer_scope __run_hi_measure_rate "$@"
+}
+
test_ets_strict()
{
RET=0
# Run high-prio traffic on its own.
- start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
local -a rate_2
- rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2"))
- check_err $? "Could not get high enough prio-2 ingress rate"
+ rate_2=($(run_hi_measure_rate "prio 2"))
local rate_2_in=${rate_2[0]}
local rate_2_eg=${rate_2[1]}
- stop_traffic # $h2.222
# Start low-prio stream.
start_traffic $h1.111 192.0.2.33 192.0.2.34 $h3mac
+ defer stop_traffic $!
local -a rate_1
rate_1=($(measure_rate $swp1 $h3 rx_octets_prio_1 "prio 1"))
@@ -290,14 +298,9 @@ test_ets_strict()
check_err $(bc <<< "$rel21 > 105")
# Start the high-prio stream--now both streams run.
- start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
- rate_3=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2 w/ 1"))
- check_err $? "Could not get high enough prio-2 ingress rate with prio-1"
+ rate_3=($(run_hi_measure_rate "prio 2+1"))
local rate_3_in=${rate_3[0]}
local rate_3_eg=${rate_3[1]}
- stop_traffic # $h2.222
-
- stop_traffic # $h1.111
# High-prio should have about the same throughput whether or not
# low-prio is in the system.
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh
index 5ac4f795e333..2b5d2c2751d5 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh
@@ -69,127 +69,103 @@ mlxsw_only_on_spectrum 2+ || exit
h1_create()
{
simple_if_init $h1
+ defer simple_if_fini $h1
vlan_create $h1 111 v$h1 192.0.2.33/28
+ defer vlan_destroy $h1 111
ip link set dev $h1.111 type vlan egress-qos-map 0:1
}
-h1_destroy()
-{
- vlan_destroy $h1 111
-
- simple_if_fini $h1
-}
-
h2_create()
{
simple_if_init $h2
+ defer simple_if_fini $h2
vlan_create $h2 111 v$h2 192.0.2.34/28
-}
-
-h2_destroy()
-{
- vlan_destroy $h2 111
-
- simple_if_fini $h2
+ defer vlan_destroy $h2 111
}
switch_create()
{
# pools
# -----
+ # devlink_pool_size_thtype_restore needs to be done first so that we can
+ # reset the various limits to values that are only valid for the
+ # original static / dynamic setting.
devlink_pool_size_thtype_save 1
- devlink_pool_size_thtype_save 6
-
- devlink_port_pool_th_save $swp1 1
- devlink_port_pool_th_save $swp2 6
-
- devlink_tc_bind_pool_th_save $swp1 1 ingress
- devlink_tc_bind_pool_th_save $swp2 1 egress
-
devlink_pool_size_thtype_set 1 dynamic $MAX_POOL_SIZE
+ defer_prio devlink_pool_size_thtype_restore 1
+
+ devlink_pool_size_thtype_save 6
devlink_pool_size_thtype_set 6 static $MAX_POOL_SIZE
+ defer_prio devlink_pool_size_thtype_restore 6
# $swp1
# -----
ip link set dev $swp1 up
+ defer ip link set dev $swp1 down
+
vlan_create $swp1 111
+ defer vlan_destroy $swp1 111
ip link set dev $swp1.111 type vlan ingress-qos-map 0:0 1:1
+ devlink_port_pool_th_save $swp1 1
devlink_port_pool_th_set $swp1 1 16
+ defer devlink_tc_bind_pool_th_restore $swp1 1 ingress
+
+ devlink_tc_bind_pool_th_save $swp1 1 ingress
devlink_tc_bind_pool_th_set $swp1 1 ingress 1 16
+ defer devlink_port_pool_th_restore $swp1 1
tc qdisc replace dev $swp1 root handle 1: \
ets bands 8 strict 8 priomap 7 6
+ defer tc qdisc del dev $swp1 root
+
dcb buffer set dev $swp1 prio-buffer all:0 1:1
+ defer dcb buffer set dev $swp1 prio-buffer all:0
# $swp2
# -----
ip link set dev $swp2 up
+ defer ip link set dev $swp2 down
+
vlan_create $swp2 111
+ defer vlan_destroy $swp2 111
ip link set dev $swp2.111 type vlan egress-qos-map 0:0 1:1
+ devlink_port_pool_th_save $swp2 6
devlink_port_pool_th_set $swp2 6 $MAX_POOL_SIZE
+ defer devlink_tc_bind_pool_th_restore $swp2 1 egress
+
+ devlink_tc_bind_pool_th_save $swp2 1 egress
devlink_tc_bind_pool_th_set $swp2 1 egress 6 $MAX_POOL_SIZE
+ defer devlink_port_pool_th_restore $swp2 6
tc qdisc replace dev $swp2 root handle 1: tbf rate $SHAPER_RATE \
burst 128K limit 500M
+ defer tc qdisc del dev $swp2 root
+
tc qdisc replace dev $swp2 parent 1:1 handle 11: \
ets bands 8 strict 8 priomap 7 6
+ defer tc qdisc del dev $swp2 parent 1:1 handle 11:
# bridge
# ------
ip link add name br1 type bridge vlan_filtering 0
+ defer ip link del dev br1
+
ip link set dev $swp1.111 master br1
+ defer ip link set dev $swp1.111 nomaster
+
ip link set dev br1 up
+ defer ip link set dev br1 down
ip link set dev $swp2.111 master br1
-}
-
-switch_destroy()
-{
- # Do this first so that we can reset the limits to values that are only
- # valid for the original static / dynamic setting.
- devlink_pool_size_thtype_restore 6
- devlink_pool_size_thtype_restore 1
-
- # bridge
- # ------
-
- ip link set dev $swp2.111 nomaster
-
- ip link set dev br1 down
- ip link set dev $swp1.111 nomaster
- ip link del dev br1
-
- # $swp2
- # -----
-
- tc qdisc del dev $swp2 parent 1:1 handle 11:
- tc qdisc del dev $swp2 root
-
- devlink_tc_bind_pool_th_restore $swp2 1 egress
- devlink_port_pool_th_restore $swp2 6
-
- vlan_destroy $swp2 111
- ip link set dev $swp2 down
-
- # $swp1
- # -----
-
- dcb buffer set dev $swp1 prio-buffer all:0
- tc qdisc del dev $swp1 root
-
- devlink_tc_bind_pool_th_restore $swp1 1 ingress
- devlink_port_pool_th_restore $swp1 1
-
- vlan_destroy $swp1 111
- ip link set dev $swp1 down
+ defer ip link set dev $swp2.111 nomaster
}
setup_prepare()
@@ -203,23 +179,13 @@ setup_prepare()
h2mac=$(mac_get $h2)
vrf_prepare
+ defer vrf_cleanup
h1_create
h2_create
switch_create
}
-cleanup()
-{
- pre_cleanup
-
- switch_destroy
- h2_destroy
- h1_destroy
-
- vrf_cleanup
-}
-
ping_ipv4()
{
ping_test $h1 192.0.2.34 " h1->h2"
@@ -251,6 +217,7 @@ max_descriptors()
log_info "Send many small packets, packet size = $pktsize bytes"
start_traffic_pktsize $pktsize $h1.111 192.0.2.33 192.0.2.34 $h2mac
+ defer stop_traffic $!
# Sleep to wait for congestion.
sleep 5
@@ -268,9 +235,6 @@ max_descriptors()
check_err $(bc <<< "$perc_used < $exp_perc_used") \
"Expected > $exp_perc_used% of descriptors, handle $perc_used%"
- stop_traffic
- sleep 1
-
log_test "Maximum descriptors usage. The percentage used is $perc_used%"
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index 6d892de43fa8..cd4a5c21360c 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -73,122 +73,114 @@ source qos_lib.sh
h1_create()
{
simple_if_init $h1 192.0.2.65/28
- mtu_set $h1 10000
-}
+ defer simple_if_fini $h1 192.0.2.65/28
-h1_destroy()
-{
- mtu_restore $h1
- simple_if_fini $h1 192.0.2.65/28
+ mtu_set $h1 10000
+ defer mtu_restore $h1
}
h2_create()
{
simple_if_init $h2
+ defer simple_if_fini $h2
+
mtu_set $h2 10000
+ defer mtu_restore $h2
vlan_create $h2 111 v$h2 192.0.2.129/28
+ defer vlan_destroy $h2 111
ip link set dev $h2.111 type vlan egress-qos-map 0:1
}
-h2_destroy()
-{
- vlan_destroy $h2 111
-
- mtu_restore $h2
- simple_if_fini $h2
-}
-
h3_create()
{
simple_if_init $h3 192.0.2.66/28
+ defer simple_if_fini $h3 192.0.2.66/28
+
mtu_set $h3 10000
+ defer mtu_restore $h3
vlan_create $h3 111 v$h3 192.0.2.130/28
-}
-
-h3_destroy()
-{
- vlan_destroy $h3 111
-
- mtu_restore $h3
- simple_if_fini $h3 192.0.2.66/28
+ defer vlan_destroy $h3 111
}
switch_create()
{
ip link set dev $swp1 up
+ defer ip link set dev $swp1 down
+
mtu_set $swp1 10000
+ defer mtu_restore $swp1
ip link set dev $swp2 up
+ defer ip link set dev $swp2 down
+
mtu_set $swp2 10000
+ defer mtu_restore $swp2
ip link set dev $swp3 up
+ defer ip link set dev $swp3 down
+
mtu_set $swp3 10000
+ defer mtu_restore $swp3
vlan_create $swp2 111
+ defer vlan_destroy $swp2 111
+
vlan_create $swp3 111
+ defer vlan_destroy $swp3 111
tc qdisc replace dev $swp3 root handle 3: tbf rate 1gbit \
burst 128K limit 1G
+ defer tc qdisc del dev $swp3 root handle 3:
+
tc qdisc replace dev $swp3 parent 3:3 handle 33: \
prio bands 8 priomap 7 7 7 7 7 7 7 7
+ defer tc qdisc del dev $swp3 parent 3:3 handle 33:
ip link add name br1 type bridge vlan_filtering 0
+ defer ip link del dev br1
ip link set dev br1 addrgenmode none
ip link set dev br1 up
+
ip link set dev $swp1 master br1
+ defer ip link set dev $swp1 nomaster
+
ip link set dev $swp3 master br1
+ defer ip link set dev $swp3 nomaster
ip link add name br111 type bridge vlan_filtering 0
+ defer ip link del dev br111
ip link set dev br111 addrgenmode none
ip link set dev br111 up
+
ip link set dev $swp2.111 master br111
+ defer ip link set dev $swp2.111 nomaster
+
ip link set dev $swp3.111 master br111
+ defer ip link set dev $swp3.111 nomaster
# Make sure that ingress quotas are smaller than egress so that there is
# room for both streams of traffic to be admitted to shared buffer.
devlink_port_pool_th_save $swp1 0
devlink_port_pool_th_set $swp1 0 5
+ defer devlink_port_pool_th_restore $swp1 0
+
devlink_tc_bind_pool_th_save $swp1 0 ingress
devlink_tc_bind_pool_th_set $swp1 0 ingress 0 5
+ defer devlink_tc_bind_pool_th_restore $swp1 0 ingress
devlink_port_pool_th_save $swp2 0
devlink_port_pool_th_set $swp2 0 5
+ defer devlink_port_pool_th_restore $swp2 0
+
devlink_tc_bind_pool_th_save $swp2 1 ingress
devlink_tc_bind_pool_th_set $swp2 1 ingress 0 5
+ defer devlink_tc_bind_pool_th_restore $swp2 1 ingress
devlink_port_pool_th_save $swp3 4
devlink_port_pool_th_set $swp3 4 12
-}
-
-switch_destroy()
-{
- devlink_port_pool_th_restore $swp3 4
-
- devlink_tc_bind_pool_th_restore $swp2 1 ingress
- devlink_port_pool_th_restore $swp2 0
-
- devlink_tc_bind_pool_th_restore $swp1 0 ingress
- devlink_port_pool_th_restore $swp1 0
-
- ip link del dev br111
- ip link del dev br1
-
- tc qdisc del dev $swp3 parent 3:3 handle 33:
- tc qdisc del dev $swp3 root handle 3:
-
- vlan_destroy $swp3 111
- vlan_destroy $swp2 111
-
- mtu_restore $swp3
- ip link set dev $swp3 down
-
- mtu_restore $swp2
- ip link set dev $swp2 down
-
- mtu_restore $swp1
- ip link set dev $swp1 down
+ defer devlink_port_pool_th_restore $swp3 4
}
setup_prepare()
@@ -205,6 +197,7 @@ setup_prepare()
h3mac=$(mac_get $h3)
vrf_prepare
+ defer vrf_cleanup
h1_create
h2_create
@@ -212,45 +205,45 @@ setup_prepare()
switch_create
}
-cleanup()
+ping_ipv4()
{
- pre_cleanup
+ ping_test $h2 192.0.2.130
+}
- switch_destroy
- h3_destroy
- h2_destroy
- h1_destroy
+__run_uc_measure_rate()
+{
+ local what=$1; shift
+ local -a uc_rate
+
+ start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
+ defer stop_traffic $!
+
+ uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_1 "$what"))
+ check_err $? "Could not get high enough $what ingress rate"
- vrf_cleanup
+ echo ${uc_rate[@]}
}
-ping_ipv4()
+run_uc_measure_rate()
{
- ping_test $h2 192.0.2.130
+ in_defer_scope __run_uc_measure_rate "$@"
}
test_mc_aware()
{
RET=0
- local -a uc_rate
- start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
- uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC-only"))
- check_err $? "Could not get high enough UC-only ingress rate"
- stop_traffic
+ local -a uc_rate=($(run_uc_measure_rate "UC-only"))
local ucth1=${uc_rate[1]}
start_traffic $h1 192.0.2.65 bc bc
+ defer stop_traffic $!
local d0=$(date +%s)
local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
local u0=$(ethtool_stats_get $swp1 rx_octets_prio_0)
- local -a uc_rate_2
- start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
- uc_rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC+MC"))
- check_err $? "Could not get high enough UC+MC ingress rate"
- stop_traffic
+ local -a uc_rate_2=($(run_uc_measure_rate "UC+MC"))
local ucth2=${uc_rate_2[1]}
local d1=$(date +%s)
@@ -272,8 +265,6 @@ test_mc_aware()
local mc_ir=$(rate $u0 $u1 $interval)
local mc_er=$(rate $t0 $t1 $interval)
- stop_traffic
-
log_test "UC performance under MC overload"
echo "UC-only throughput $(humanize $ucth1)"
@@ -297,6 +288,7 @@ test_uc_aware()
RET=0
start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
+ defer stop_traffic $!
local d0=$(date +%s)
local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
@@ -326,8 +318,6 @@ test_uc_aware()
((attempts == passes))
check_err $?
- stop_traffic
-
log_test "MC performance under UC overload"
echo " ingress UC throughput $(humanize ${uc_ir})"
echo " egress UC throughput $(humanize ${uc_er})"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 893a693ad805..45a569618424 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -186,10 +186,7 @@ bridge_vlan_flags_test()
# If we did not handle references correctly, then this should produce a
# trace
- devlink dev reload "$DEVLINK_DEV"
-
- # Allow netdevices to be re-created following the reload
- sleep 20
+ devlink_reload
log_test "bridge vlan flags"
}
@@ -923,12 +920,9 @@ devlink_reload_test()
# devlink reload can be performed without errors
RET=0
- devlink dev reload "$DEVLINK_DEV"
- check_err $? "devlink reload failed"
+ devlink_reload
log_test "devlink reload - last test"
-
- sleep 20
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
index 139175fd03e7..4aaceb6b2b60 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
@@ -21,6 +21,7 @@ switch_create()
# Create a bottleneck so that the DWRR process can kick in.
tc qdisc replace dev $swp2 root handle 3: tbf rate 1gbit \
burst 128K limit 1G
+ defer tc qdisc del dev $swp2 root handle 3:
ets_switch_create
@@ -30,16 +31,27 @@ switch_create()
# for the DWRR process.
devlink_port_pool_th_save $swp1 0
devlink_port_pool_th_set $swp1 0 12
+ defer devlink_port_pool_th_restore $swp1 0
+
devlink_tc_bind_pool_th_save $swp1 0 ingress
devlink_tc_bind_pool_th_set $swp1 0 ingress 0 12
+ defer devlink_tc_bind_pool_th_restore $swp1 0 ingress
+
devlink_port_pool_th_save $swp2 4
devlink_port_pool_th_set $swp2 4 12
+ defer devlink_port_pool_th_restore $swp2 4
+
devlink_tc_bind_pool_th_save $swp2 7 egress
devlink_tc_bind_pool_th_set $swp2 7 egress 4 5
+ defer devlink_tc_bind_pool_th_restore $swp2 7 egress
+
devlink_tc_bind_pool_th_save $swp2 6 egress
devlink_tc_bind_pool_th_set $swp2 6 egress 4 5
+ defer devlink_tc_bind_pool_th_restore $swp2 6 egress
+
devlink_tc_bind_pool_th_save $swp2 5 egress
devlink_tc_bind_pool_th_set $swp2 5 egress 4 5
+ defer devlink_tc_bind_pool_th_restore $swp2 5 egress
# Note: sch_ets_core.sh uses VLAN ingress-qos-map to assign packet
# priorities at $swp1 based on their 802.1p headers. ingress-qos-map is
@@ -47,20 +59,6 @@ switch_create()
# 1:1, which is the mapping currently hard-coded by the driver.
}
-switch_destroy()
-{
- devlink_tc_bind_pool_th_restore $swp2 5 egress
- devlink_tc_bind_pool_th_restore $swp2 6 egress
- devlink_tc_bind_pool_th_restore $swp2 7 egress
- devlink_port_pool_th_restore $swp2 4
- devlink_tc_bind_pool_th_restore $swp1 0 ingress
- devlink_port_pool_th_restore $swp1 0
-
- ets_switch_destroy
-
- tc qdisc del dev $swp2 root handle 3:
-}
-
# Callback from sch_ets_tests.sh
collect_stats()
{
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
index 299e06a5808c..537d6baa77b7 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -75,6 +75,18 @@ source $lib_dir/lib.sh
source $lib_dir/devlink_lib.sh
source mlxsw_lib.sh
+stop_traffic_sleep()
+{
+ local pid=$1; shift
+
+ # Issuing a kill still leaves a bunch of packets lingering in the
+ # buffers. This traffic then arrives at the point where a follow-up test
+ # is already running, and can confuse the test. Therefore sleep after
+ # stopping traffic to flush any leftover packets.
+ stop_traffic "$pid"
+ sleep 1
+}
+
ipaddr()
{
local host=$1; shift
@@ -89,39 +101,31 @@ host_create()
local host=$1; shift
simple_if_init $dev
+ defer simple_if_fini $dev
+
mtu_set $dev 10000
+ defer mtu_restore $dev
vlan_create $dev 10 v$dev $(ipaddr $host 10)/28
+ defer vlan_destroy $dev 10
ip link set dev $dev.10 type vlan egress 0:0
vlan_create $dev 11 v$dev $(ipaddr $host 11)/28
+ defer vlan_destroy $dev 11
ip link set dev $dev.11 type vlan egress 0:1
}
-host_destroy()
-{
- local dev=$1; shift
-
- vlan_destroy $dev 11
- vlan_destroy $dev 10
- mtu_restore $dev
- simple_if_fini $dev
-}
-
h1_create()
{
host_create $h1 1
}
-h1_destroy()
-{
- host_destroy $h1
-}
-
h2_create()
{
host_create $h2 2
+
tc qdisc add dev $h2 clsact
+ defer tc qdisc del dev $h2 clsact
# Some of the tests in this suite use multicast traffic. As this traffic
# enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus
@@ -137,15 +141,9 @@ h2_create()
# Prevent this by adding a shaper which limits the traffic in $h2 to
# 1Gbps.
- tc qdisc replace dev $h2 root handle 10: tbf rate 1gbit \
+ tc qdisc replace dev $h2 root handle 10: tbf rate 200mbit \
burst 128K limit 1G
-}
-
-h2_destroy()
-{
- tc qdisc del dev $h2 root handle 10:
- tc qdisc del dev $h2 clsact
- host_destroy $h2
+ defer tc qdisc del dev $h2 root handle 10:
}
h3_create()
@@ -153,40 +151,54 @@ h3_create()
host_create $h3 3
}
-h3_destroy()
-{
- host_destroy $h3
-}
-
switch_create()
{
local intf
local vlan
ip link add dev br1_10 type bridge
+ defer ip link del dev br1_10
+
ip link add dev br1_11 type bridge
+ defer ip link del dev br1_11
ip link add dev br2_10 type bridge
+ defer ip link del dev br2_10
+
ip link add dev br2_11 type bridge
+ defer ip link del dev br2_11
for intf in $swp1 $swp2 $swp3 $swp4 $swp5; do
ip link set dev $intf up
+ defer ip link set dev $intf down
+
mtu_set $intf 10000
+ defer mtu_restore $intf
done
for intf in $swp1 $swp4; do
for vlan in 10 11; do
vlan_create $intf $vlan
+ defer vlan_destroy $intf $vlan
+
ip link set dev $intf.$vlan master br1_$vlan
+ defer ip link set dev $intf.$vlan nomaster
+
ip link set dev $intf.$vlan up
+ defer ip link set dev $intf.$vlan up
done
done
for intf in $swp2 $swp3 $swp5; do
for vlan in 10 11; do
vlan_create $intf $vlan
+ defer vlan_destroy $intf $vlan
+
ip link set dev $intf.$vlan master br2_$vlan
+ defer ip link set dev $intf.$vlan nomaster
+
ip link set dev $intf.$vlan up
+ defer ip link set dev $intf.$vlan up
done
done
@@ -199,51 +211,27 @@ switch_create()
done
for intf in $swp3 $swp4; do
- tc qdisc replace dev $intf root handle 1: tbf rate 1gbit \
+ tc qdisc replace dev $intf root handle 1: tbf rate 200mbit \
burst 128K limit 1G
+ defer tc qdisc del dev $intf root handle 1:
done
ip link set dev br1_10 up
+ defer ip link set dev br1_10 down
+
ip link set dev br1_11 up
+ defer ip link set dev br1_11 down
+
ip link set dev br2_10 up
+ defer ip link set dev br2_10 down
+
ip link set dev br2_11 up
+ defer ip link set dev br2_11 down
local size=$(devlink_pool_size_thtype 0 | cut -d' ' -f 1)
devlink_port_pool_th_save $swp3 8
devlink_port_pool_th_set $swp3 8 $size
-}
-
-switch_destroy()
-{
- local intf
- local vlan
-
- devlink_port_pool_th_restore $swp3 8
-
- ip link set dev br2_11 down
- ip link set dev br2_10 down
- ip link set dev br1_11 down
- ip link set dev br1_10 down
-
- for intf in $swp4 $swp3; do
- tc qdisc del dev $intf root handle 1:
- done
-
- for intf in $swp5 $swp3 $swp2 $swp4 $swp1; do
- for vlan in 11 10; do
- ip link set dev $intf.$vlan down
- ip link set dev $intf.$vlan nomaster
- vlan_destroy $intf $vlan
- done
-
- mtu_restore $intf
- ip link set dev $intf down
- done
-
- ip link del dev br2_11
- ip link del dev br2_10
- ip link del dev br1_11
- ip link del dev br1_10
+ defer devlink_port_pool_th_restore $swp3 8
}
setup_prepare()
@@ -263,6 +251,7 @@ setup_prepare()
h3_mac=$(mac_get $h3)
vrf_prepare
+ defer vrf_cleanup
h1_create
h2_create
@@ -270,18 +259,6 @@ setup_prepare()
switch_create
}
-cleanup()
-{
- pre_cleanup
-
- switch_destroy
- h3_destroy
- h2_destroy
- h1_destroy
-
- vrf_cleanup
-}
-
ping_ipv4()
{
ping_test $h1.10 $(ipaddr 3 10) " from host 1, vlan 10"
@@ -372,6 +349,7 @@ build_backlog()
local i=0
while :; do
+ sleep 1
local cur=$(busywait 1100 until_counter_is "> $cur" \
get_qdisc_backlog $vlan)
local diff=$((size - cur))
@@ -449,6 +427,7 @@ __do_ecn_test()
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
$h3_mac tos=0x01
+ defer stop_traffic_sleep $!
sleep 1
ecn_test_common "$name" "$get_nmarked" $vlan $limit
@@ -460,9 +439,6 @@ __do_ecn_test()
build_backlog $vlan $((2 * limit)) udp >/dev/null
check_fail $? "UDP traffic went into backlog instead of being early-dropped"
log_test "TC $((vlan - 10)): $name backlog > limit: UDP early-dropped"
-
- stop_traffic
- sleep 1
}
do_ecn_test()
@@ -470,7 +446,8 @@ do_ecn_test()
local vlan=$1; shift
local limit=$1; shift
- __do_ecn_test get_nmarked "$vlan" "$limit"
+ in_defer_scope \
+ __do_ecn_test get_nmarked "$vlan" "$limit"
}
do_ecn_test_perband()
@@ -479,10 +456,11 @@ do_ecn_test_perband()
local limit=$1; shift
mlxsw_only_on_spectrum 3+ || return
- __do_ecn_test get_qdisc_nmarked "$vlan" "$limit" "per-band ECN"
+ in_defer_scope \
+ __do_ecn_test get_qdisc_nmarked "$vlan" "$limit" "per-band ECN"
}
-do_ecn_nodrop_test()
+__do_ecn_nodrop_test()
{
local vlan=$1; shift
local limit=$1; shift
@@ -490,6 +468,7 @@ do_ecn_nodrop_test()
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
$h3_mac tos=0x01
+ defer stop_traffic_sleep $!
sleep 1
ecn_test_common "$name" get_nmarked $vlan $limit
@@ -501,12 +480,15 @@ do_ecn_nodrop_test()
build_backlog $vlan $((2 * limit)) udp >/dev/null
check_err $? "UDP traffic was early-dropped instead of getting into backlog"
log_test "TC $((vlan - 10)): $name backlog > limit: UDP not dropped"
+}
- stop_traffic
- sleep 1
+do_ecn_nodrop_test()
+{
+ in_defer_scope \
+ __do_ecn_nodrop_test "$@"
}
-do_red_test()
+__do_red_test()
{
local vlan=$1; shift
local limit=$1; shift
@@ -517,6 +499,7 @@ do_red_test()
# is above limit.
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
$h3_mac tos=0x01
+ defer stop_traffic_sleep $!
# Pushing below the queue limit should work.
RET=0
@@ -532,17 +515,21 @@ do_red_test()
check_fail $? "Traffic went into backlog instead of being early-dropped"
pct=$(check_marking get_nmarked $vlan "== 0")
check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+ backlog=$(get_qdisc_backlog $vlan)
local diff=$((limit - backlog))
pct=$((100 * diff / limit))
- ((-10 <= pct && pct <= 10))
- check_err $? "backlog $backlog / $limit expected <= 10% distance"
+ ((-15 <= pct && pct <= 15))
+ check_err $? "backlog $backlog / $limit expected <= 15% distance"
log_test "TC $((vlan - 10)): RED backlog > limit"
+}
- stop_traffic
- sleep 1
+do_red_test()
+{
+ in_defer_scope \
+ __do_red_test "$@"
}
-do_mc_backlog_test()
+__do_mc_backlog_test()
{
local vlan=$1; shift
local limit=$1; shift
@@ -552,7 +539,10 @@ do_mc_backlog_test()
RET=0
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) bc
+ defer stop_traffic_sleep $!
+
start_tcp_traffic $h2.$vlan $(ipaddr 2 $vlan) $(ipaddr 3 $vlan) bc
+ defer stop_traffic_sleep $!
qbl=$(busywait 5000 until_counter_is ">= 500000" \
get_qdisc_backlog $vlan)
@@ -565,13 +555,16 @@ do_mc_backlog_test()
get_mc_transmit_queue $vlan)
check_err $? "MC backlog reported by qdisc not visible in ethtool"
- stop_traffic
- stop_traffic
-
log_test "TC $((vlan - 10)): Qdisc reports MC backlog"
}
-do_mark_test()
+do_mc_backlog_test()
+{
+ in_defer_scope \
+ __do_mc_backlog_test "$@"
+}
+
+__do_mark_test()
{
local vlan=$1; shift
local limit=$1; shift
@@ -586,6 +579,7 @@ do_mark_test()
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
$h3_mac tos=0x01
+ defer stop_traffic_sleep $!
# Create a bit of a backlog and observe no mirroring due to marks.
qevent_rule_install_$subtest
@@ -600,7 +594,7 @@ do_mark_test()
# Above limit, everything should be mirrored, we should see lots of
# packets.
build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01 >/dev/null
- busywait_for_counter 1100 +10000 \
+ busywait_for_counter 1100 +2500 \
$fetch_counter > /dev/null
check_err_fail "$should_fail" $? "ECN-marked packets $subtest'd"
@@ -615,12 +609,15 @@ do_mark_test()
else
log_test "TC $((vlan - 10)): marked packets $subtest'd"
fi
+}
- stop_traffic
- sleep 1
+do_mark_test()
+{
+ in_defer_scope \
+ __do_mark_test "$@"
}
-do_drop_test()
+__do_drop_test()
{
local vlan=$1; shift
local limit=$1; shift
@@ -635,6 +632,7 @@ do_drop_test()
RET=0
start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac
+ defer stop_traffic_sleep $!
# Create a bit of a backlog and observe no mirroring due to drops.
qevent_rule_install_$subtest
@@ -651,25 +649,30 @@ do_drop_test()
build_backlog $vlan $((3 * limit / 2)) udp >/dev/null
base=$($fetch_counter)
- send_packets $vlan udp 11
+ send_packets $vlan udp 100
- now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter)
- check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen"
+ now=$(busywait 1100 until_counter_is ">= $((base + 95))" $fetch_counter)
+ check_err $? "${trigger}ped packets not observed: 100 expected, $((now - base)) seen"
# When no extra traffic is injected, there should be no mirroring.
- busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null
+ busywait 1100 until_counter_is ">= $((base + 110))" \
+ $fetch_counter >/dev/null
check_fail $? "Spurious packets observed"
# When the rule is uninstalled, there should be no mirroring.
qevent_rule_uninstall_$subtest
- send_packets $vlan udp 11
- busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null
- check_fail $? "Spurious packets observed after uninstall"
+ send_packets $vlan udp 100
+ now=$(busywait 1100 until_counter_is ">= $((base + 110))" \
+ $fetch_counter)
+ check_fail $? "$((now - base)) spurious packets observed after uninstall"
log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd"
+}
- stop_traffic
- sleep 1
+do_drop_test()
+{
+ in_defer_scope \
+ __do_drop_test "$@"
}
qevent_rule_install_mirror()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
index 8ecddafa79b3..8902a115d9cd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
@@ -20,8 +20,8 @@ source sch_red_core.sh
# $BACKLOG2 are far enough not to overlap, so that we can assume that if we do
# see (do not see) marking, it is actually due to the configuration of that one
# TC, and not due to configuration of the other TC leaking over.
-BACKLOG1=200000
-BACKLOG2=500000
+BACKLOG1=400000
+BACKLOG2=1000000
install_root_qdisc()
{
@@ -35,7 +35,7 @@ install_qdisc_tc0()
tc qdisc add dev $swp3 parent 10:8 handle 108: red \
limit 1000000 min $BACKLOG1 max $((BACKLOG1 + 1)) \
- probability 1.0 avpkt 8000 burst 38 "${args[@]}"
+ probability 1.0 avpkt 8000 burst 51 "${args[@]}"
}
install_qdisc_tc1()
@@ -44,7 +44,7 @@ install_qdisc_tc1()
tc qdisc add dev $swp3 parent 10:7 handle 107: red \
limit 1000000 min $BACKLOG2 max $((BACKLOG2 + 1)) \
- probability 1.0 avpkt 8000 burst 63 "${args[@]}"
+ probability 1.0 avpkt 8000 burst 126 "${args[@]}"
}
install_qdisc()
@@ -80,36 +80,34 @@ uninstall_qdisc()
ecn_test()
{
install_qdisc ecn
+ defer uninstall_qdisc
do_ecn_test 10 $BACKLOG1
do_ecn_test 11 $BACKLOG2
-
- uninstall_qdisc
}
ecn_test_perband()
{
install_qdisc ecn
+ defer uninstall_qdisc
do_ecn_test_perband 10 $BACKLOG1
do_ecn_test_perband 11 $BACKLOG2
-
- uninstall_qdisc
}
ecn_nodrop_test()
{
install_qdisc ecn nodrop
+ defer uninstall_qdisc
do_ecn_nodrop_test 10 $BACKLOG1
do_ecn_nodrop_test 11 $BACKLOG2
-
- uninstall_qdisc
}
red_test()
{
install_qdisc
+ defer uninstall_qdisc
# Make sure that we get the non-zero value if there is any.
local cur=$(busywait 1100 until_counter_is "> 0" \
@@ -120,50 +118,44 @@ red_test()
do_red_test 10 $BACKLOG1
do_red_test 11 $BACKLOG2
-
- uninstall_qdisc
}
mc_backlog_test()
{
install_qdisc
+ defer uninstall_qdisc
# Note that the backlog numbers here do not correspond to RED
# configuration, but are arbitrary.
do_mc_backlog_test 10 $BACKLOG1
do_mc_backlog_test 11 $BACKLOG2
-
- uninstall_qdisc
}
red_mirror_test()
{
install_qdisc qevent early_drop block 10
+ defer uninstall_qdisc
do_drop_mirror_test 10 $BACKLOG1 early_drop
do_drop_mirror_test 11 $BACKLOG2 early_drop
-
- uninstall_qdisc
}
red_trap_test()
{
install_qdisc qevent early_drop block 10
+ defer uninstall_qdisc
do_drop_trap_test 10 $BACKLOG1 early_drop
do_drop_trap_test 11 $BACKLOG2 early_drop
-
- uninstall_qdisc
}
ecn_mirror_test()
{
install_qdisc ecn qevent mark block 10
+ defer uninstall_qdisc
do_mark_mirror_test 10 $BACKLOG1
do_mark_mirror_test 11 $BACKLOG2
-
- uninstall_qdisc
}
bail_on_lldpad "configure DCB" "configure Qdiscs"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
index 159108d02895..e9043771787b 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
@@ -32,45 +32,51 @@ uninstall_qdisc()
ecn_test()
{
install_qdisc ecn
+ defer uninstall_qdisc
+
do_ecn_test 10 $BACKLOG
- uninstall_qdisc
}
ecn_test_perband()
{
install_qdisc ecn
+ defer uninstall_qdisc
+
do_ecn_test_perband 10 $BACKLOG
- uninstall_qdisc
}
ecn_nodrop_test()
{
install_qdisc ecn nodrop
+ defer uninstall_qdisc
+
do_ecn_nodrop_test 10 $BACKLOG
- uninstall_qdisc
}
red_test()
{
install_qdisc
+ defer uninstall_qdisc
+
do_red_test 10 $BACKLOG
- uninstall_qdisc
}
mc_backlog_test()
{
install_qdisc
+ defer uninstall_qdisc
+
# Note that the backlog value here does not correspond to RED
# configuration, but is arbitrary.
do_mc_backlog_test 10 $BACKLOG
- uninstall_qdisc
}
red_mirror_test()
{
install_qdisc qevent early_drop block 10
+ defer uninstall_qdisc
+
do_drop_mirror_test 10 $BACKLOG
- uninstall_qdisc
}
bail_on_lldpad "configure DCB" "configure Qdiscs"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
index 83a0210e7544..bc7ea2df49fb 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_sample.sh
@@ -218,7 +218,7 @@ psample_capture_start()
psample_capture_stop()
{
- { kill %% && wait %%; } 2>/dev/null
+ kill_process %%
}
__tc_sample_rate_test()
@@ -499,7 +499,7 @@ tc_sample_md_out_tc_occ_test()
backlog=$(tc -j -p -s qdisc show dev $rp2 | jq '.[0]["backlog"]')
# Kill mausezahn.
- { kill %% && wait %%; } 2>/dev/null
+ kill_process %%
psample_capture_stop
diff --git a/tools/testing/selftests/drivers/net/netcons_basic.sh b/tools/testing/selftests/drivers/net/netcons_basic.sh
index 06021b2059b7..b175f4d966e5 100755
--- a/tools/testing/selftests/drivers/net/netcons_basic.sh
+++ b/tools/testing/selftests/drivers/net/netcons_basic.sh
@@ -20,22 +20,26 @@ SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
# Simple script to test dynamic targets in netconsole
SRCIF="" # to be populated later
-SRCIP=192.168.1.1
+SRCIP=192.0.2.1
DSTIF="" # to be populated later
-DSTIP=192.168.1.2
+DSTIP=192.0.2.2
PORT="6666"
MSG="netconsole selftest"
+USERDATA_KEY="key"
+USERDATA_VALUE="value"
TARGET=$(mktemp -u netcons_XXXXX)
DEFAULT_PRINTK_VALUES=$(cat /proc/sys/kernel/printk)
NETCONS_CONFIGFS="/sys/kernel/config/netconsole"
NETCONS_PATH="${NETCONS_CONFIGFS}"/"${TARGET}"
+KEY_PATH="${NETCONS_PATH}/userdata/${USERDATA_KEY}"
# NAMESPACE will be populated by setup_ns with a random value
NAMESPACE=""
# IDs for netdevsim
NSIM_DEV_1_ID=$((256 + RANDOM % 256))
NSIM_DEV_2_ID=$((512 + RANDOM % 256))
+NSIM_DEV_SYS_NEW="/sys/bus/netdevsim/new_device"
# Used to create and delete namespaces
source "${SCRIPTDIR}"/../../net/lib.sh
@@ -43,7 +47,6 @@ source "${SCRIPTDIR}"/../../net/net_helper.sh
# Create netdevsim interfaces
create_ifaces() {
- local NSIM_DEV_SYS_NEW=/sys/bus/netdevsim/new_device
echo "$NSIM_DEV_2_ID" > "$NSIM_DEV_SYS_NEW"
echo "$NSIM_DEV_1_ID" > "$NSIM_DEV_SYS_NEW"
@@ -122,6 +125,8 @@ function cleanup() {
# delete netconsole dynamic reconfiguration
echo 0 > "${NETCONS_PATH}"/enabled
+ # Remove key
+ rmdir "${KEY_PATH}"
# Remove the configfs entry
rmdir "${NETCONS_PATH}"
@@ -136,6 +141,18 @@ function cleanup() {
echo "${DEFAULT_PRINTK_VALUES}" > /proc/sys/kernel/printk
}
+function set_user_data() {
+ if [[ ! -d "${NETCONS_PATH}""/userdata" ]]
+ then
+ echo "Userdata path not available in ${NETCONS_PATH}/userdata"
+ exit "${ksft_skip}"
+ fi
+
+ mkdir -p "${KEY_PATH}"
+ VALUE_PATH="${KEY_PATH}""/value"
+ echo "${USERDATA_VALUE}" > "${VALUE_PATH}"
+}
+
function listen_port_and_save_to() {
local OUTPUT=${1}
# Just wait for 2 seconds
@@ -146,6 +163,10 @@ function listen_port_and_save_to() {
function validate_result() {
local TMPFILENAME="$1"
+ # TMPFILENAME will contain something like:
+ # 6.11.1-0_fbk0_rc13_509_g30d75cea12f7,13,1822,115075213798,-;netconsole selftest: netcons_gtJHM
+ # key=value
+
# Check if the file exists
if [ ! -f "$TMPFILENAME" ]; then
echo "FAIL: File was not generated." >&2
@@ -158,6 +179,12 @@ function validate_result() {
exit "${ksft_fail}"
fi
+ if ! grep -q "${USERDATA_KEY}=${USERDATA_VALUE}" "${TMPFILENAME}"; then
+ echo "FAIL: ${USERDATA_KEY}=${USERDATA_VALUE} not found in ${TMPFILENAME}" >&2
+ cat "${TMPFILENAME}" >&2
+ exit "${ksft_fail}"
+ fi
+
# Delete the file once it is validated, otherwise keep it
# for debugging purposes
rm "${TMPFILENAME}"
@@ -185,6 +212,11 @@ function check_for_dependencies() {
exit "${ksft_skip}"
fi
+ if [ ! -f "${NSIM_DEV_SYS_NEW}" ]; then
+ echo "SKIP: file ${NSIM_DEV_SYS_NEW} does not exist. Check if CONFIG_NETDEVSIM is enabled" >&2
+ exit "${ksft_skip}"
+ fi
+
if [ ! -d "${NETCONS_CONFIGFS}" ]; then
echo "SKIP: directory ${NETCONS_CONFIGFS} does not exist. Check if NETCONSOLE_DYNAMIC is enabled" >&2
exit "${ksft_skip}"
@@ -220,6 +252,8 @@ trap cleanup EXIT
set_network
# Create a dynamic target for netconsole
create_dynamic_target
+# Set userdata "key" with the "value" value
+set_user_data
# Listed for netconsole port inside the namespace and destination interface
listen_port_and_save_to "${OUTPUT_FILE}" &
# Wait for socat to start and listen to the port.
diff --git a/tools/testing/selftests/drivers/net/netdevsim/Makefile b/tools/testing/selftests/drivers/net/netdevsim/Makefile
index 5bace0b7fb57..07b7c46d3311 100644
--- a/tools/testing/selftests/drivers/net/netdevsim/Makefile
+++ b/tools/testing/selftests/drivers/net/netdevsim/Makefile
@@ -4,11 +4,14 @@ TEST_PROGS = devlink.sh \
devlink_in_netns.sh \
devlink_trap.sh \
ethtool-coalesce.sh \
+ ethtool-features.sh \
ethtool-fec.sh \
ethtool-pause.sh \
ethtool-ring.sh \
fib.sh \
+ fib_notifications.sh \
hw_stats_l3.sh \
+ macsec-offload.sh \
nexthop.sh \
peer.sh \
psample.sh \
diff --git a/tools/testing/selftests/drivers/net/netdevsim/config b/tools/testing/selftests/drivers/net/netdevsim/config
index adf45a3a78b4..5117c78ddf0a 100644
--- a/tools/testing/selftests/drivers/net/netdevsim/config
+++ b/tools/testing/selftests/drivers/net/netdevsim/config
@@ -1,6 +1,7 @@
CONFIG_DUMMY=y
CONFIG_GENEVE=m
CONFIG_IPV6=y
+CONFIG_MACSEC=m
CONFIG_NETDEVSIM=m
CONFIG_NET_SCH_MQPRIO=y
CONFIG_NET_SCH_MULTIQ=y
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-features.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-features.sh
new file mode 100644
index 000000000000..bc210dc6ad2d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-features.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+source ethtool-common.sh
+
+NSIM_NETDEV=$(make_netdev)
+
+set -o pipefail
+
+FEATS="
+ tx-checksum-ip-generic
+ tx-scatter-gather
+ tx-tcp-segmentation
+ generic-segmentation-offload
+ generic-receive-offload"
+
+for feat in $FEATS ; do
+ s=$(ethtool --json -k $NSIM_NETDEV | jq ".[].\"$feat\".active" 2>/dev/null)
+ check $? "$s" true
+
+ s=$(ethtool --json -k $NSIM_NETDEV | jq ".[].\"$feat\".fixed" 2>/dev/null)
+ check $? "$s" false
+done
+
+if [ $num_errors -eq 0 ]; then
+ echo "PASSED all $((num_passes)) checks"
+ exit 0
+else
+ echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
+ exit 1
+fi
diff --git a/tools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh b/tools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh
index 8d91191a098c..9896580c3d85 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh
@@ -94,7 +94,7 @@ route_addition_check()
sleep 1
$IP route add $route dev dummy1
sleep 1
- kill %% && wait %% &> /dev/null
+ kill_process %%
route_notify_check $outfile $expected_num_notifications $offload_failed
rm -f $outfile
@@ -148,7 +148,7 @@ route_deletion_check()
sleep 1
$IP route del $route dev dummy1
sleep 1
- kill %% && wait %% &> /dev/null
+ kill_process %%
route_notify_check $outfile $expected_num_notifications
rm -f $outfile
@@ -191,7 +191,7 @@ route_replacement_check()
sleep 1
$IP route replace $route dev dummy2
sleep 1
- kill %% && wait %% &> /dev/null
+ kill_process %%
route_notify_check $outfile $expected_num_notifications
rm -f $outfile
diff --git a/tools/testing/selftests/drivers/net/netdevsim/macsec-offload.sh b/tools/testing/selftests/drivers/net/netdevsim/macsec-offload.sh
new file mode 100755
index 000000000000..98033e6667d2
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/macsec-offload.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+source ethtool-common.sh
+
+NSIM_NETDEV=$(make_netdev)
+MACSEC_NETDEV=macsec_nsim
+
+set -o pipefail
+
+if ! ethtool -k $NSIM_NETDEV | grep -q 'macsec-hw-offload: on'; then
+ echo "SKIP: netdevsim doesn't support MACsec offload"
+ exit 4
+fi
+
+if ! ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec offload mac 2>/dev/null; then
+ echo "SKIP: couldn't create macsec device"
+ exit 4
+fi
+ip link del $MACSEC_NETDEV
+
+#
+# test macsec offload API
+#
+
+ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}" type macsec port 4 offload mac
+check $?
+
+ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}2" type macsec address "aa:bb:cc:dd:ee:ff" port 5 offload mac
+check $?
+
+ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}3" type macsec sci abbacdde01020304 offload mac
+check $?
+
+ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}4" type macsec port 8 offload mac 2> /dev/null
+check $? '' '' 1
+
+ip macsec add "${MACSEC_NETDEV}" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
+check $?
+
+ip macsec add "${MACSEC_NETDEV}" rx port 1234 address "1c:ed:de:ad:be:ef"
+check $?
+
+ip macsec add "${MACSEC_NETDEV}" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on \
+ key 00 0123456789abcdef0123456789abcdef
+check $?
+
+ip macsec add "${MACSEC_NETDEV}" rx port 1235 address "1c:ed:de:ad:be:ef" 2> /dev/null
+check $? '' '' 1
+
+# can't disable macsec offload when SAs are configured
+ip link set "${MACSEC_NETDEV}" type macsec offload off 2> /dev/null
+check $? '' '' 1
+
+ip macsec offload "${MACSEC_NETDEV}" off 2> /dev/null
+check $? '' '' 1
+
+# toggle macsec offload via rtnetlink
+ip link set "${MACSEC_NETDEV}2" type macsec offload off
+check $?
+
+ip link set "${MACSEC_NETDEV}2" type macsec offload mac
+check $?
+
+# toggle macsec offload via genetlink
+ip macsec offload "${MACSEC_NETDEV}2" off
+check $?
+
+ip macsec offload "${MACSEC_NETDEV}2" mac
+check $?
+
+for dev in ${MACSEC_NETDEV}{,2,3} ; do
+ ip link del $dev
+ check $?
+done
+
+
+#
+# test ethtool features when toggling offload
+#
+
+ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec offload mac
+TMP_FEATS_ON_1="$(ethtool -k $MACSEC_NETDEV)"
+
+ip link set $MACSEC_NETDEV type macsec offload off
+TMP_FEATS_OFF_1="$(ethtool -k $MACSEC_NETDEV)"
+
+ip link set $MACSEC_NETDEV type macsec offload mac
+TMP_FEATS_ON_2="$(ethtool -k $MACSEC_NETDEV)"
+
+[ "$TMP_FEATS_ON_1" = "$TMP_FEATS_ON_2" ]
+check $?
+
+ip link del $MACSEC_NETDEV
+
+ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec
+check $?
+
+TMP_FEATS_OFF_2="$(ethtool -k $MACSEC_NETDEV)"
+[ "$TMP_FEATS_OFF_1" = "$TMP_FEATS_OFF_2" ]
+check $?
+
+ip link set $MACSEC_NETDEV type macsec offload mac
+check $?
+
+TMP_FEATS_ON_3="$(ethtool -k $MACSEC_NETDEV)"
+[ "$TMP_FEATS_ON_1" = "$TMP_FEATS_ON_3" ]
+check $?
+
+
+if [ $num_errors -eq 0 ]; then
+ echo "PASSED all $((num_passes)) checks"
+ exit 0
+else
+ echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
+ exit 1
+fi
diff --git a/tools/testing/selftests/drivers/net/shaper.py b/tools/testing/selftests/drivers/net/shaper.py
new file mode 100755
index 000000000000..11310f19bfa0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/shaper.py
@@ -0,0 +1,461 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_true, KsftSkipEx
+from lib.py import EthtoolFamily, NetshaperFamily
+from lib.py import NetDrvEnv
+from lib.py import NlError
+from lib.py import cmd
+
+def get_shapers(cfg, nl_shaper) -> None:
+ try:
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+
+ # Default configuration: no shapers configured.
+ ksft_eq(len(shapers), 0)
+
+def get_caps(cfg, nl_shaper) -> None:
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex}, dump=True)
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+
+ # Each device implementing shaper support must support some
+ # features in at least a scope.
+ ksft_true(len(caps)> 0)
+
+def set_qshapers(cfg, nl_shaper) -> None:
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex,
+ 'scope':'queue'})
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+ if not 'support-bw-max' in caps or not 'support-metric-bps' in caps:
+ raise KsftSkipEx("device does not support queue scope shapers with bw_max and metric bps")
+
+ cfg.queues = True;
+ netnl = EthtoolFamily()
+ channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+ if channels['combined-count'] == 0:
+ cfg.rx_type = 'rx'
+ cfg.nr_queues = channels['rx-count']
+ else:
+ cfg.rx_type = 'combined'
+ cfg.nr_queues = channels['combined-count']
+ if cfg.nr_queues < 3:
+ raise KsftSkipEx(f"device does not support enough queues min 3 found {cfg.nr_queues}")
+
+ nl_shaper.set({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+ nl_shaper.set({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 2},
+ 'metric': 'bps',
+ 'bw-max': 20000})
+
+ # Querying a specific shaper not yet configured must fail.
+ raised = False
+ try:
+ shaper_q0 = nl_shaper.get({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 0}})
+ except (NlError):
+ raised = True
+ ksft_eq(raised, True)
+
+ shaper_q1 = nl_shaper.get({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1}})
+ ksft_eq(shaper_q1, {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'metric': 'bps',
+ 'bw-max': 10000},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 2},
+ 'metric': 'bps',
+ 'bw-max': 20000}])
+
+def del_qshapers(cfg, nl_shaper) -> None:
+ if not cfg.queues:
+ raise KsftSkipEx("queue shapers not supported by device, skipping delete")
+
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 2}})
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1}})
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(len(shapers), 0)
+
+def set_nshapers(cfg, nl_shaper) -> None:
+ # Check required features.
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex,
+ 'scope':'netdev'})
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+ if not 'support-bw-max' in caps or not 'support-metric-bps' in caps:
+ raise KsftSkipEx("device does not support nested netdev scope shapers with weight")
+
+ cfg.netdev = True;
+ nl_shaper.set({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'netdev', 'id': 0},
+ 'bw-max': 100000})
+
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'netdev'},
+ 'metric': 'bps',
+ 'bw-max': 100000}])
+
+def del_nshapers(cfg, nl_shaper) -> None:
+ if not cfg.netdev:
+ raise KsftSkipEx("netdev shaper not supported by device, skipping delete")
+
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'netdev'}})
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(len(shapers), 0)
+
+def basic_groups(cfg, nl_shaper) -> None:
+ if not cfg.netdev:
+ raise KsftSkipEx("netdev shaper not supported by the device")
+ if cfg.nr_queues < 3:
+ raise KsftSkipEx(f"netdev does not have enough queues min 3 reported {cfg.nr_queues}")
+
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex,
+ 'scope':'queue'})
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+ if not 'support-weight' in caps:
+ raise KsftSkipEx("device does not support queue scope shapers with weight")
+
+ node_handle = nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 1},
+ {'handle': {'scope': 'queue', 'id': 2},
+ 'weight': 2}],
+ 'handle': {'scope':'netdev'},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+ ksft_eq(node_handle, {'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'netdev'}})
+
+ shaper = nl_shaper.get({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1}})
+ ksft_eq(shaper, {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 1 })
+
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 2}})
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1}})
+
+ # Deleting all the leaves shaper does not affect the node one
+ # when the latter has 'netdev' scope.
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(len(shapers), 1)
+
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'netdev'}})
+
+def qgroups(cfg, nl_shaper) -> None:
+ if cfg.nr_queues < 4:
+ raise KsftSkipEx(f"netdev does not have enough queues min 4 reported {cfg.nr_queues}")
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex,
+ 'scope':'node'})
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+ if not 'support-bw-max' in caps or not 'support-metric-bps' in caps:
+ raise KsftSkipEx("device does not support node scope shapers with bw_max and metric bps")
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex,
+ 'scope':'queue'})
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("shapers not supported by the device")
+ raise
+ if not 'support-nesting' in caps or not 'support-weight' in caps or not 'support-metric-bps' in caps:
+ raise KsftSkipEx("device does not support nested queue scope shapers with weight")
+
+ cfg.groups = True;
+ node_handle = nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 3},
+ {'handle': {'scope': 'queue', 'id': 2},
+ 'weight': 2}],
+ 'handle': {'scope':'node'},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+ node_id = node_handle['handle']['id']
+
+ shaper = nl_shaper.get({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1}})
+ ksft_eq(shaper, {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 3})
+ shaper = nl_shaper.get({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'node', 'id': node_id}})
+ ksft_eq(shaper, {'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'node', 'id': node_id},
+ 'parent': {'scope': 'netdev'},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+
+ # Grouping to a specified, not existing node scope shaper must fail
+ raised = False
+ try:
+ nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 3}],
+ 'handle': {'scope':'node', 'id': node_id + 1},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+
+ except (NlError):
+ raised = True
+ ksft_eq(raised, True)
+
+ # Add to an existing node
+ node_handle = nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 4}],
+ 'handle': {'scope':'node', 'id': node_id}})
+ ksft_eq(node_handle, {'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'node', 'id': node_id}})
+
+ shaper = nl_shaper.get({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 3}})
+ ksft_eq(shaper, {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 4})
+
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 2}})
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 1}})
+
+ # Deleting a non empty node will move the leaves downstream.
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'node', 'id': node_id}})
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 4}])
+
+ # Finish and verify the complete cleanup.
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': 3}})
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(len(shapers), 0)
+
+def delegation(cfg, nl_shaper) -> None:
+ if not cfg.groups:
+ raise KsftSkipEx("device does not support node scope")
+ try:
+ caps = nl_shaper.cap_get({'ifindex': cfg.ifindex,
+ 'scope':'node'})
+ except NlError as e:
+ if e.error == 95:
+ raise KsftSkipEx("node scope shapers not supported by the device")
+ raise
+ if not 'support-nesting' in caps:
+ raise KsftSkipEx("device does not support node scope shapers nesting")
+
+ node_handle = nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 3},
+ {'handle': {'scope': 'queue', 'id': 2},
+ 'weight': 2},
+ {'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 1}],
+ 'handle': {'scope':'node'},
+ 'metric': 'bps',
+ 'bw-max': 10000})
+ node_id = node_handle['handle']['id']
+
+ # Create the nested node and validate the hierarchy
+ nested_node_handle = nl_shaper.group({
+ 'ifindex': cfg.ifindex,
+ 'leaves':[{'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 3},
+ {'handle': {'scope': 'queue', 'id': 2},
+ 'weight': 2}],
+ 'handle': {'scope':'node'},
+ 'metric': 'bps',
+ 'bw-max': 5000})
+ nested_node_id = nested_node_handle['handle']['id']
+ ksft_true(nested_node_id != node_id)
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': nested_node_id},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 3},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': nested_node_id},
+ 'handle': {'scope': 'queue', 'id': 2},
+ 'weight': 2},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 1},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'node', 'id': node_id},
+ 'metric': 'bps',
+ 'bw-max': 10000},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'node', 'id': nested_node_id},
+ 'metric': 'bps',
+ 'bw-max': 5000}])
+
+ # Deleting a non empty node will move the leaves downstream.
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'node', 'id': nested_node_id}})
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'weight': 3},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'queue', 'id': 2},
+ 'weight': 2},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'node', 'id': node_id},
+ 'handle': {'scope': 'queue', 'id': 3},
+ 'weight': 1},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'node', 'id': node_id},
+ 'metric': 'bps',
+ 'bw-max': 10000}])
+
+ # Final cleanup.
+ for i in range(1, 4):
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': i}})
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(len(shapers), 0)
+
+def queue_update(cfg, nl_shaper) -> None:
+ if cfg.nr_queues < 4:
+ raise KsftSkipEx(f"netdev does not have enough queues min 4 reported {cfg.nr_queues}")
+ if not cfg.queues:
+ raise KsftSkipEx("device does not support queue scope")
+
+ for i in range(3):
+ nl_shaper.set({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': i},
+ 'metric': 'bps',
+ 'bw-max': (i + 1) * 1000})
+ # Delete a channel, with no shapers configured on top of the related
+ # queue: no changes expected
+ cmd(f"ethtool -L {cfg.dev['ifname']} {cfg.rx_type} 3", timeout=10)
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 0},
+ 'metric': 'bps',
+ 'bw-max': 1000},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'metric': 'bps',
+ 'bw-max': 2000},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 2},
+ 'metric': 'bps',
+ 'bw-max': 3000}])
+
+ # Delete a channel, with a shaper configured on top of the related
+ # queue: the shaper must be deleted, too
+ cmd(f"ethtool -L {cfg.dev['ifname']} {cfg.rx_type} 2", timeout=10)
+
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 0},
+ 'metric': 'bps',
+ 'bw-max': 1000},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'metric': 'bps',
+ 'bw-max': 2000}])
+
+ # Restore the original channels number, no expected changes
+ cmd(f"ethtool -L {cfg.dev['ifname']} {cfg.rx_type} {cfg.nr_queues}", timeout=10)
+ shapers = nl_shaper.get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_eq(shapers, [{'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 0},
+ 'metric': 'bps',
+ 'bw-max': 1000},
+ {'ifindex': cfg.ifindex,
+ 'parent': {'scope': 'netdev'},
+ 'handle': {'scope': 'queue', 'id': 1},
+ 'metric': 'bps',
+ 'bw-max': 2000}])
+
+ # Final cleanup.
+ for i in range(0, 2):
+ nl_shaper.delete({'ifindex': cfg.ifindex,
+ 'handle': {'scope': 'queue', 'id': i}})
+
+def main() -> None:
+ with NetDrvEnv(__file__, queue_count=4) as cfg:
+ cfg.queues = False
+ cfg.netdev = False
+ cfg.groups = False
+ cfg.nr_queues = 0
+ ksft_run([get_shapers,
+ get_caps,
+ set_qshapers,
+ del_qshapers,
+ set_nshapers,
+ del_nshapers,
+ basic_groups,
+ qgroups,
+ delegation,
+ queue_update], args=(cfg, NetshaperFamily()))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()