summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile6
-rw-r--r--arch/loongarch/kernel/Makefile8
-rw-r--r--arch/loongarch/kvm/Makefile2
-rw-r--r--arch/riscv/kernel/Makefile4
-rw-r--r--crypto/scompress.c10
-rw-r--r--crypto/testmgr.c147
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c9
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c36
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c9
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h4
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/microchip.c46
-rw-r--r--drivers/net/phy/phy_led_triggers.c23
-rw-r--r--drivers/net/phy/phylink.c38
-rw-r--r--drivers/net/virtio_net.c69
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c8
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/ufs/core/ufs-mcq.c12
-rw-r--r--drivers/ufs/core/ufshcd.c31
-rw-r--r--drivers/ufs/host/ufs-qcom.c43
-rw-r--r--drivers/ufs/host/ufs-qcom.h18
-rw-r--r--drivers/vhost/scsi.c74
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_pci_modern.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/subpage.c4
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/zoned.c19
-rw-r--r--include/linux/blkdev.h67
-rw-r--r--include/linux/local_lock_internal.h8
-rw-r--r--include/linux/phylink.h31
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/uapi/linux/landlock.h87
-rw-r--r--include/uapi/linux/vhost.h4
-rw-r--r--include/uapi/linux/virtio_pci.h1
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--kernel/cgroup/cgroup.c31
-rw-r--r--kernel/sched/ext.c50
-rw-r--r--kernel/vhost_task.c2
-rw-r--r--net/core/lwtunnel.c26
-rw-r--r--net/core/netdev-genl.c9
-rw-r--r--net/core/selftests.c18
-rw-r--r--net/mptcp/pm_userspace.c6
-rw-r--r--net/sched/sch_hfsc.c23
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--scripts/Makefile.extrawarn2
-rw-r--r--security/integrity/ima/ima_main.c4
-rw-r--r--security/landlock/domain.c4
-rw-r--r--security/landlock/domain.h2
-rw-r--r--security/landlock/syscalls.c27
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c2
-rw-r--r--tools/testing/selftests/landlock/audit.h21
-rw-r--r--tools/testing/selftests/landlock/audit_test.c154
-rw-r--r--tools/testing/selftests/landlock/fs_test.c3
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh5
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json39
74 files changed, 975 insertions, 485 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index fa1e04e87d1d..f31aeb6b452e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3191,6 +3191,12 @@ M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
F: drivers/clk/socfpga/
+ARM/SOCFPGA DWMAC GLUE LAYER
+M: Maxime Chevallier <maxime.chevallier@bootlin.com>
+S: Maintained
+F: Documentation/devicetree/bindings/net/socfpga-dwmac.txt
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+
ARM/SOCFPGA EDAC BINDINGS
M: Matthew Gerlach <matthew.gerlach@altera.com>
S: Maintained
@@ -16812,6 +16818,7 @@ F: Documentation/networking/net_cachelines/net_device.rst
F: drivers/connector/
F: drivers/net/
F: drivers/ptp/
+F: drivers/s390/net/
F: include/dt-bindings/net/
F: include/linux/cn_proc.h
F: include/linux/etherdevice.h
@@ -16821,6 +16828,7 @@ F: include/linux/fddidevice.h
F: include/linux/hippidevice.h
F: include/linux/if_*
F: include/linux/inetdevice.h
+F: include/linux/ism.h
F: include/linux/netdev*
F: include/linux/platform_data/wiznet.h
F: include/uapi/linux/cn_proc.h
@@ -21312,6 +21320,7 @@ L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
F: drivers/s390/net/
+F: include/linux/ism.h
S390 PCI SUBSYSTEM
M: Niklas Schnelle <schnelle@linux.ibm.com>
diff --git a/Makefile b/Makefile
index 3dcad2319662..07f818186151 100644
--- a/Makefile
+++ b/Makefile
@@ -1053,11 +1053,11 @@ NOSTDINC_FLAGS += -nostdinc
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
#Currently, disable -Wstringop-overflow for GCC 11, globally.
-KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
+KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
-#Currently, disable -Wunterminated-string-initialization as an error
-KBUILD_CFLAGS += $(call cc-option, -Wno-error=unterminated-string-initialization)
+#Currently, disable -Wunterminated-string-initialization as broken
+KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 4853e8b04c6f..f9dcaa60033d 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_module.o += $(call cc-disable-warning, override-init)
+CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
+CFLAGS_traps.o += $(call cc-disable-warning, override-init)
+CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index f4c8e35c216a..cb41d9265662 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -21,4 +21,4 @@ kvm-y += intc/eiointc.o
kvm-y += intc/pch_pic.o
kvm-y += irqfd.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_exit.o += $(call cc-disable-warning, override-init)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 8d186bfced45..f7480c9c6f8d 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
-CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init)
+CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_KEXEC_CORE
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 5762fcc63b51..36934c78d127 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -215,8 +215,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
spage = nth_page(spage, soff / PAGE_SIZE);
soff = offset_in_page(soff);
- n = slen / PAGE_SIZE;
- n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
if (PageHighMem(nth_page(spage, n)) &&
size_add(soff, slen) > PAGE_SIZE)
break;
@@ -243,9 +243,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
dpage = nth_page(dpage, doff / PAGE_SIZE);
doff = offset_in_page(doff);
- n = dlen / PAGE_SIZE;
- n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
- if (PageHighMem(dpage + n) &&
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(nth_page(dpage, n)) &&
size_add(doff, dlen) > PAGE_SIZE)
break;
dst = kmap_local_page(dpage) + doff;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index abd609d4c8ef..82977ea25db3 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -58,9 +58,6 @@ module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
#endif
-/* Multibuffer is unlimited. Set arbitrary limit for testing. */
-#define MAX_MB_MSGS 16
-
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/* a perfect nop */
@@ -3329,48 +3326,27 @@ static int test_acomp(struct crypto_acomp *tfm,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
- struct scatterlist *src = NULL, *dst = NULL;
- struct acomp_req *reqs[MAX_MB_MSGS] = {};
- char *decomp_out[MAX_MB_MSGS] = {};
- char *output[MAX_MB_MSGS] = {};
- struct crypto_wait wait;
- struct acomp_req *req;
- int ret = -ENOMEM;
unsigned int i;
+ char *output, *decomp_out;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct crypto_wait wait;
- src = kmalloc_array(MAX_MB_MSGS, sizeof(*src), GFP_KERNEL);
- if (!src)
- goto out;
- dst = kmalloc_array(MAX_MB_MSGS, sizeof(*dst), GFP_KERNEL);
- if (!dst)
- goto out;
-
- for (i = 0; i < MAX_MB_MSGS; i++) {
- reqs[i] = acomp_request_alloc(tfm);
- if (!reqs[i])
- goto out;
-
- acomp_request_set_callback(reqs[i],
- CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
- if (i)
- acomp_request_chain(reqs[i], reqs[0]);
-
- output[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output[i])
- goto out;
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
- decomp_out[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_out[i])
- goto out;
+ decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!decomp_out) {
+ kfree(output);
+ return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
void *input_vec;
- int j;
input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
if (!input_vec) {
@@ -3378,61 +3354,70 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(dst + j, output[j], dlen);
- acomp_request_set_params(reqs[j], src, dst + j, ilen, dlen);
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ kfree(input_vec);
+ ret = -ENOMEM;
+ goto out;
}
- req = reqs[0];
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
+ acomp_request_free(req);
goto out;
}
ilen = req->dlen;
dlen = COMP_BUF_SIZE;
+ sg_init_one(&src, output, ilen);
+ sg_init_one(&dst, decomp_out, dlen);
crypto_init_wait(&wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(src + j, output[j], ilen);
- sg_init_one(dst + j, decomp_out[j], dlen);
- acomp_request_set_params(reqs[j], src + j, dst + j, ilen, dlen);
- }
-
- crypto_wait_req(crypto_acomp_decompress(req), &wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- ret = reqs[j]->base.err;
- if (ret) {
- pr_err("alg: acomp: compression failed on test %d (%d) for %s: ret=%d\n",
- i + 1, j, algo, -ret);
- kfree(input_vec);
- goto out;
- }
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
- if (reqs[j]->dlen != ctemplate[i].inlen) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s: output len = %d\n",
- i + 1, j, algo, reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
- if (memcmp(input_vec, decomp_out[j], reqs[j]->dlen)) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s\n",
- i + 1, j, algo);
- hexdump(output[j], reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ if (req->dlen != ctemplate[i].inlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(input_vec, decomp_out, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
}
kfree(input_vec);
+ acomp_request_free(req);
}
for (i = 0; i < dtcount; i++) {
@@ -3446,9 +3431,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
- sg_init_one(dst, output[0], dlen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
@@ -3459,7 +3445,7 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- acomp_request_set_params(req, src, dst, ilen, dlen);
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
@@ -3481,10 +3467,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- if (memcmp(output[0], dtemplate[i].output, req->dlen)) {
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
pr_err("alg: acomp: Decompression test %d failed for %s\n",
i + 1, algo);
- hexdump(output[0], req->dlen);
+ hexdump(output, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
@@ -3498,13 +3484,8 @@ static int test_acomp(struct crypto_acomp *tfm,
ret = 0;
out:
- acomp_request_free(reqs[0]);
- for (i = 0; i < MAX_MB_MSGS; i++) {
- kfree(output[i]);
- kfree(decomp_out[i]);
- }
- kfree(dst);
- kfree(src);
+ kfree(decomp_out);
+ kfree(output);
return ret;
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index b5205d464a8a..2295abbecd14 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -396,7 +396,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst __nonstring = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5f04951d0dd4..088182e54deb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1576,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 cols;
+ __virtio16 rows;
} size;
if (!is_console_port(port))
@@ -1585,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 75bebec2c757..0fcf4a39de27 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 2d88e390feb4..e32e680c7197 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -128,6 +128,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_put(dev);
}
+static void virtio_gpu_shutdown(struct virtio_device *vdev)
+{
+ /*
+ * drm does its own synchronization on shutdown.
+ * Do nothing here, opt out of device reset.
+ */
+}
+
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
@@ -162,6 +170,7 @@ static struct virtio_driver virtio_gpu_driver = {
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
+ .shutdown = virtio_gpu_shutdown,
.config_changed = virtio_gpu_config_changed
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index d70399bce5b9..c5d6628d7980 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2419,6 +2419,9 @@ mt7531_setup_common(struct dsa_switch *ds)
struct mt7530_priv *priv = ds->priv;
int ret, i;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2571,9 +2574,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ds->assisted_learning_on_cpu_port = true;
- ds->mtu_enforcement_ingress = true;
-
return 0;
}
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index c83a0a80d533..506f682d15c1 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -5,11 +5,6 @@
#include "core.h"
-struct pdsc_wait_context {
- struct pdsc_qcq *qcq;
- struct completion wait_completion;
-};
-
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
{
union pds_core_notifyq_comp *comp;
@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
q_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- /* Copy out the completion data */
- memcpy(q_info->dest, comp, sizeof(*comp));
-
- complete_all(&q_info->wc->wait_completion);
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
static int __pdsc_adminq_post(struct pdsc *pdsc,
struct pdsc_qcq *qcq,
union pds_core_adminq_cmd *cmd,
- union pds_core_adminq_comp *comp,
- struct pdsc_wait_context *wc)
+ union pds_core_adminq_comp *comp)
{
struct pdsc_queue *q = &qcq->q;
struct pdsc_q_info *q_info;
@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Post the request */
index = q->head_idx;
q_info = &q->info[index];
- q_info->wc = wc;
q_info->dest = comp;
memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
q->head_idx, q->tail_idx);
@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- struct pdsc_wait_context wc = {
- .wait_completion =
- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
- };
unsigned long poll_interval = 1;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
+ struct completion *wc;
int err = 0;
int index;
@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
return -ENXIO;
}
- wc.qcq = &pdsc->adminqcq;
- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
if (index < 0) {
err = index;
goto err_out;
}
+ wc = &pdsc->adminqcq.q.info[index].completion;
time_start = jiffies;
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
poll_jiffies = msecs_to_jiffies(poll_interval);
- remaining = wait_for_completion_timeout(&wc.wait_completion,
- poll_jiffies);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
- /* Check the results */
- if (time_after_eq(time_done, time_limit))
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
err = -ETIMEDOUT;
+ complete(wc);
+ }
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index eeb72b1809ea..c9aac27883a3 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
dev_dbg(pf->dev, "%s: %s opcode %d\n",
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
- if (pf->state)
- return -ENXIO;
-
/* Wrap the client's request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 1eb0d92786f7..9512aa4083f0 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
q->base = base;
q->base_pa = base_pa;
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
}
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
size_t sz;
int err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
numdescs,
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 0bf320c43083..0b53a1fab46d 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -16,7 +16,7 @@
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 16
-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
#define PDSC_TEARDOWN_REMOVING true
@@ -96,7 +96,7 @@ struct pdsc_q_info {
unsigned int bytes;
unsigned int nbufs;
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
- struct pdsc_wait_context *wc;
+ struct completion completion;
void *dest;
};
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index c5c787df61a4..d8dc39da4161 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -105,7 +105,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
.fw_control.oper = PDS_CORE_FW_GET_LIST,
};
- struct pds_core_fw_list_info fw_list;
+ struct pds_core_fw_list_info fw_list = {};
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[32];
@@ -118,8 +118,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (!err)
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
- if (err && err != -EIO)
- return err;
listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 2106861463e4..3ee52f4b1166 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1850,6 +1850,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+}
+
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@@ -1868,11 +1878,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
+ int orig_i, err;
u32 bd_status;
- int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1887,7 +1896,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
@@ -1915,15 +1923,21 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
napi_gro_receive(napi, skb);
break;
@@ -1965,11 +1979,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -3362,7 +3372,8 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
bdr->buffer_offset = ENETC_RXB_PAD;
priv->rx_ring[i] = bdr;
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
if (err)
goto free_vector;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index bdb98c9d8b1c..47807b202310 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4043,11 +4043,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
- /* PSE should not drop port1, port8 and port9 packets */
- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+ /* PSE dummy page mechanism */
+ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
+ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
+
+ /* PSE free buffer drop threshold */
+ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
+
+ /* PSE should not drop port8, port9 and port13 packets from
+ * WDMA Tx
+ */
+ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
+ * ring full
+ */
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
/* GDM and CDM Threshold */
- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
@@ -4064,7 +4080,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 39709649ea8d..88ef2e9c50fc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -151,7 +151,15 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
-#define PSE_PPE0_DROP 0x110
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index eb3bd9c7f66e..ca9ecec358b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -637,10 +637,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -654,7 +650,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
@@ -710,10 +715,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -727,7 +728,16 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&ttc_groups[TTC_GROUPS_DEFAULT];
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 967a16212faf..0c011a47d5a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -320,8 +320,8 @@ enum rtc_control {
/* PTP and timestamping registers */
-#define GMAC3_X_ATSNS GENMASK(19, 16)
-#define GMAC3_X_ATSNS_SHIFT 16
+#define GMAC3_X_ATSNS GENMASK(29, 25)
+#define GMAC3_X_ATSNS_SHIFT 25
#define GMAC_PTP_TCR_ATSFC BIT(24)
#define GMAC_PTP_TCR_ATSEN0 BIT(25)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index a8b901cdf5cb..56b76aaa58f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -553,7 +553,7 @@ void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + GMAC_PTP_ATNR);
- ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 0f59aa982604..e2840fa241f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -222,7 +222,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + PTP_ATNR);
- ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 14f361549638..e32013eb0186 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -730,7 +730,7 @@ static int dp83822_phy_reset(struct phy_device *phydev)
return phydev->drv->config_init(phydev);
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
static const u32 tx_amplitude_100base_tx_gain[] = {
80, 82, 83, 85, 87, 88, 90, 92,
93, 95, 97, 98, 100, 102, 103, 105,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0e17cc458efd..93de88c1c8fd 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -37,47 +37,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
}
-static int lan88xx_phy_config_intr(struct phy_device *phydev)
-{
- int rc;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
- rc = phy_read(phydev, LAN88XX_INT_STS);
- rc = phy_write(phydev, LAN88XX_INT_MASK,
- LAN88XX_INT_MASK_MDINTPIN_EN_ |
- LAN88XX_INT_MASK_LINK_CHANGE_);
- } else {
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
- if (rc)
- return rc;
-
- /* Ack interrupts after they have been disabled */
- rc = phy_read(phydev, LAN88XX_INT_STS);
- }
-
- return rc < 0 ? rc : 0;
-}
-
-static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status;
-
- irq_status = phy_read(phydev, LAN88XX_INT_STS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
@@ -528,8 +487,9 @@ static struct phy_driver microchip_phy_driver[] = {
.config_aneg = lan88xx_config_aneg,
.link_change_notify = lan88xx_link_change_notify,
- .config_intr = lan88xx_phy_config_intr,
- .handle_interrupt = lan88xx_handle_interrupt,
+ /* Interrupt handling is broken, do not define related
+ * functions to force polling.
+ */
.suspend = lan88xx_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index bd3c9554f6ac..60893691d4c3 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -93,9 +93,8 @@ int phy_led_triggers_register(struct phy_device *phy)
if (!phy->phy_num_led_triggers)
return 0;
- phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
- sizeof(*phy->led_link_trigger),
- GFP_KERNEL);
+ phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
+ GFP_KERNEL);
if (!phy->led_link_trigger) {
err = -ENOMEM;
goto out_clear;
@@ -105,10 +104,9 @@ int phy_led_triggers_register(struct phy_device *phy)
if (err)
goto out_free_link;
- phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
- phy->phy_num_led_triggers,
- sizeof(struct phy_led_trigger),
- GFP_KERNEL);
+ phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
+ sizeof(struct phy_led_trigger),
+ GFP_KERNEL);
if (!phy->phy_led_triggers) {
err = -ENOMEM;
goto out_unreg_link;
@@ -129,11 +127,11 @@ int phy_led_triggers_register(struct phy_device *phy)
out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
- devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+ kfree(phy->phy_led_triggers);
out_unreg_link:
phy_led_trigger_unregister(phy->led_link_trigger);
out_free_link:
- devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
phy->led_link_trigger = NULL;
out_clear:
phy->phy_num_led_triggers = 0;
@@ -147,8 +145,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+ kfree(phy->phy_led_triggers);
+ phy->phy_led_triggers = NULL;
- if (phy->led_link_trigger)
+ if (phy->led_link_trigger) {
phy_led_trigger_unregister(phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
+ phy->led_link_trigger = NULL;
+ }
}
EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b68369e2342b..1bdd5d8bb5b0 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -81,6 +81,7 @@ struct phylink {
unsigned int pcs_state;
bool link_failed;
+ bool suspend_link_up;
bool major_config_failed;
bool mac_supports_eee_ops;
bool mac_supports_eee;
@@ -2545,14 +2546,16 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
/* Stop the resolver bringing the link up */
__set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
- /* Disable the carrier, to prevent transmit timeouts,
- * but one would hope all packets have been sent. This
- * also means phylink_resolve() will do nothing.
- */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- else
+ pl->suspend_link_up = phylink_link_is_up(pl);
+ if (pl->suspend_link_up) {
+ /* Disable the carrier, to prevent transmit timeouts,
+ * but one would hope all packets have been sent. This
+ * also means phylink_resolve() will do nothing.
+ */
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
pl->old_link_state = false;
+ }
/* We do not call mac_link_down() here as we want the
* link to remain up to receive the WoL packets.
@@ -2603,15 +2606,18 @@ void phylink_resume(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
/* Wake-on-Lan enabled, MAC handling */
- /* Call mac_link_down() so we keep the overall state balanced.
- * Do this under the state_mutex lock for consistency. This
- * will cause a "Link Down" message to be printed during
- * resume, which is harmless - the true link state will be
- * printed when we run a resolve.
- */
- mutex_lock(&pl->state_mutex);
- phylink_link_down(pl);
- mutex_unlock(&pl->state_mutex);
+ if (pl->suspend_link_up) {
+ /* Call mac_link_down() so we keep the overall state
+ * balanced. Do this under the state_mutex lock for
+ * consistency. This will cause a "Link Down" message
+ * to be printed during resume, which is harmless -
+ * the true link state will be printed when we run a
+ * resolve.
+ */
+ mutex_lock(&pl->state_mutex);
+ phylink_link_down(pl);
+ mutex_unlock(&pl->state_mutex);
+ }
/* Re-apply the link parameters so that all the settings get
* restored to the MAC.
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7e4617216a4b..848fab51dfa1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3342,7 +3342,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void __virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
@@ -3352,17 +3353,63 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_pause_all(struct virtnet_info *vi)
+{
+ int i;
+
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ __virtnet_rx_pause(vi, &vi->rq[i]);
+}
+
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ __virtnet_rx_pause(vi, rq);
+}
+
+static void __virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
bool running = netif_running(vi->dev);
- if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
if (running)
virtnet_napi_enable(rq);
}
+static void virtnet_rx_resume_all(struct virtnet_info *vi)
+{
+ int i;
+
+ enable_delayed_refill(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (i < vi->curr_queue_pairs)
+ __virtnet_rx_resume(vi, &vi->rq[i], true);
+ else
+ __virtnet_rx_resume(vi, &vi->rq[i], false);
+ }
+}
+
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ enable_delayed_refill(vi);
+ __virtnet_rx_resume(vi, rq, true);
+}
+
static int virtnet_rx_resize(struct virtnet_info *vi,
struct receive_queue *rq, u32 ring_num)
{
@@ -5959,12 +6006,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (prog)
bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ virtnet_rx_pause_all(vi);
+
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_disable(&vi->rq[i]);
+ for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_tx_disable(&vi->sq[i]);
- }
}
if (!prog) {
@@ -5996,13 +6043,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_enabled = false;
}
+ virtnet_rx_resume_all(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev)) {
- virtnet_napi_enable(&vi->rq[i]);
+ if (netif_running(dev))
virtnet_napi_tx_enable(&vi->sq[i]);
- }
}
return 0;
@@ -6014,11 +6060,10 @@ err:
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
+ virtnet_rx_resume_all(vi);
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(&vi->rq[i]);
+ for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_tx_enable(&vi->sq[i]);
- }
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index fc52d5c4c69b..5091e1fa4a0d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_TX:
- get_page(pdata);
xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+ }
+ get_page(pdata);
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
- if (unlikely(!err))
+ if (unlikely(err <= 0)) {
+ if (err < 0)
+ trace_xdp_exception(queue->info->netdev, prog, act);
xdp_return_frame_rx_napi(xdpf);
- else if (unlikely(err < 0))
- trace_xdp_exception(queue->info->netdev, prog, act);
+ }
break;
case XDP_REDIRECT:
get_page(pdata);
err = xdp_do_redirect(queue->info->netdev, xdp, prog);
*need_xdp_flush = true;
- if (unlikely(err))
+ if (unlikely(err)) {
trace_xdp_exception(queue->info->netdev, prog, act);
+ xdp_return_buff(xdp);
+ }
break;
case XDP_PASS:
case XDP_DROP:
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 3fcb1ad3b070..1d7901a8f0e4 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
char *desc = NULL;
u16 event;
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
+ return;
+
event = event_reply->event;
switch (event) {
@@ -451,6 +454,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
return 0;
}
+ atomic_set(&mrioc->admin_pend_isr, 0);
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
@@ -565,7 +569,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
- atomic_dec(&op_reply_q->pend_ios);
+
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
@@ -2925,6 +2929,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
mrioc->admin_reply_ci = 0;
mrioc->admin_reply_ephase = 1;
atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ atomic_set(&mrioc->admin_pend_isr, 0);
if (!mrioc->admin_req_base) {
mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
@@ -4653,6 +4658,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
if (mrioc->admin_reply_base)
memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ atomic_set(&mrioc->admin_pend_isr, 0);
if (mrioc->init_cmds.reply) {
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0d29470e86b0..1b43013d72c0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1253,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
*/
static void scsi_cleanup_rq(struct request *rq)
{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->flags = 0;
+
if (rq->rq_flags & RQF_DONTPREP) {
- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ scsi_mq_uninit_cmd(cmd);
rq->rq_flags &= ~RQF_DONTPREP;
}
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 1244ef3aa86c..620ba6e0ab07 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4263,8 +4263,8 @@ int iscsit_close_connection(
spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
- iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+ iscsit_stop_nopin_response_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..f1294c29f484 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 44156041d88f..5cb6132b8147 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -5677,6 +5678,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -8470,6 +8473,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8480,6 +8508,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 1b37449fbffc..c0761ccc1381 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -33,6 +33,10 @@
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
+/* De-emphasis for gear-5 */
+#define DEEMPHASIS_3_5_dB 0x04
+#define NO_DEEMPHASIS 0x0
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -795,6 +799,23 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
}
+static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes)
+{
+ u32 equalizer_val;
+ int ret, i;
+
+ /* Determine the equalizer value based on the gear */
+ equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS;
+
+ for (i = 0; i < tx_lanes; i++) {
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i),
+ equalizer_val);
+ if (ret)
+ dev_err(hba->dev, "%s: failed equalizer lane %d\n",
+ __func__, i);
+ }
+}
+
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
const struct ufs_pa_layer_attr *dev_max_params,
@@ -846,6 +867,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING)
+ ufs_qcom_set_tx_hs_equalizer(hba,
+ dev_req_params->gear_tx, dev_req_params->lane_tx);
+
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, false)) {
@@ -893,6 +919,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
(pa_vs_config_reg1 | (1 << 12)));
}
+static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH),
+ PA_TX_HSG1_SYNC_LENGTH_VAL);
+ if (err)
+ dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
+}
+
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{
int err = 0;
@@ -900,6 +936,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH)
+ ufs_qcom_override_pa_tx_hsg1_sync_len(hba);
+
return err;
}
@@ -914,6 +953,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_WDC,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
+ .model = UFS_ANY_MODEL,
+ .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH |
+ UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING },
{}
};
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index d0e6ec9128e7..05d4cb569c50 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -122,8 +122,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* QUniPro Vendor specific attributes */
+#define PA_TX_HSG1_SYNC_LENGTH 0x1552
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
+#define TX_HS_EQUALIZER 0x0037
+
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
@@ -141,6 +144,21 @@ enum {
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
+/* TX_HSG1_SYNC_LENGTH attr value */
+#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A
+
+/*
+ * Some ufs device vendors need a different TSync length.
+ * Enable this quirk to give an additional TX_HS_SYNC_LENGTH.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16)
+
+/*
+ * Some ufs device vendors need a different Deemphasis setting.
+ * Enable this quirk to tune TX Deemphasis parameters.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17)
+
/* ICE allocator type to share AES engines among TX stream and RX stream */
#define ICE_ALLOCATOR_TYPE 2
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f6f5a7ac7894..26bcf3a7f70c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -627,6 +627,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
int ret;
llnode = llist_del_all(&svq->completion_list);
+
+ mutex_lock(&svq->vq.mutex);
+
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -660,6 +663,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_scsi_release_cmd_res(se_cmd);
}
+ mutex_unlock(&svq->vq.mutex);
+
if (signal)
vhost_signal(&svq->vs->dev, &svq->vq);
}
@@ -994,39 +999,66 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
static void
vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
- int head, unsigned int out, u8 status)
+ struct vhost_scsi_ctx *vc, u8 status)
{
- struct virtio_scsi_cmd_resp __user *resp;
struct virtio_scsi_cmd_resp rsp;
+ struct iov_iter iov_iter;
int ret;
memset(&rsp, 0, sizeof(rsp));
rsp.status = status;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
+#define TYPE_IO_CMD 0
+#define TYPE_CTRL_TMF 1
+#define TYPE_CTRL_AN 2
+
static void
vhost_scsi_send_bad_target(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
- int head, unsigned out)
+ struct vhost_scsi_ctx *vc, int type)
{
- struct virtio_scsi_cmd_resp __user *resp;
- struct virtio_scsi_cmd_resp rsp;
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ } rsp;
+ struct iov_iter iov_iter;
+ size_t rsp_size;
int ret;
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+
+ if (type == TYPE_IO_CMD) {
+ rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+ rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else if (type == TYPE_CTRL_TMF) {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ rsp_size);
+
+ ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
+
+ if (likely(ret == rsp_size))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
- pr_err("Faulted on virtio_scsi_cmd_resp\n");
+ pr_err("Faulted on virtio scsi type=%d\n", type);
}
static int
@@ -1390,9 +1422,9 @@ err:
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
else if (ret == -ENOMEM)
- vhost_scsi_send_status(vs, vq, vc.head, vc.out,
+ vhost_scsi_send_status(vs, vq, &vc,
SAM_STAT_TASK_SET_FULL);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
@@ -1432,8 +1464,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
else
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ mutex_lock(&tmf->svq->vq.mutex);
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
+ mutex_unlock(&tmf->svq->vq.mutex);
+
vhost_scsi_release_tmf_res(tmf);
}
@@ -1623,7 +1658,10 @@ err:
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc,
+ v_req.type == VIRTIO_SCSI_T_TMF ?
+ TYPE_CTRL_TMF :
+ TYPE_CTRL_AN);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 150753c3b578..95d5d7993e5b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -407,6 +407,12 @@ static void virtio_dev_shutdown(struct device *_d)
if (!drv)
return;
+ /* If the driver has its own shutdown method, use that. */
+ if (drv->shutdown) {
+ drv->shutdown(dev);
+ return;
+ }
+
/*
* Some devices get wedged if you kick them after they are
* reset. Mark all vqs as broken to make sure we don't.
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 5eaade757860..d50fe030d825 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -247,7 +247,7 @@ virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
sg_init_one(&data_sg, get_data, sizeof(*get_data));
sg_init_one(&result_sg, result, sizeof(*result));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.data_sg = &data_sg;
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
@@ -305,7 +305,7 @@ static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
sg_init_one(&result_sg, data, sizeof(*data));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fdd2d2b07b5a..b784aab66867 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2650,7 +2650,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->event_triggered)
- vq->event_triggered = false;
+ data_race(vq->event_triggered = false);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 262a707d8990..71b8a825c447 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2104,15 +2104,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* will always return true.
* So here we need to do extra page alignment for
* filemap_range_has_page().
+ *
+ * And do not decrease page_lockend right now, as it can be 0.
*/
const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
- const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
+ const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE);
while (1) {
truncate_pagecache_range(inode, lockstart, lockend);
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
cached_state);
+ /* The same page or adjacent pages. */
+ if (page_lockend <= page_lockstart)
+ break;
/*
* We can't have ordered extents in the range, nor dirty/writeback
* pages, because we have locked the inode's VFS lock in exclusive
@@ -2124,7 +2129,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* we do, unlock the range and retry.
*/
if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
- page_lockend))
+ page_lockend - 1))
break;
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f948f4f6431c..e17bcb034595 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3803,7 +3803,7 @@ out:
if (ret) {
if (inode)
iput(&inode->vfs_inode);
- inode = ERR_PTR(ret);
+ return ERR_PTR(ret);
}
return &inode->vfs_inode;
}
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 11dbd7be6a3b..c0a0b8b063d0 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -204,7 +204,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
btrfs_blocks_per_folio(fs_info, folio); \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
- __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
+ __start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
__start_bit += blocks_per_folio * btrfs_bitmap_nr_##name; \
__start_bit; \
})
@@ -666,7 +666,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
btrfs_blocks_per_folio(fs_info, folio); \
const struct btrfs_subpage *subpage = folio_get_private(folio); \
\
- ASSERT(blocks_per_folio < BITS_PER_LONG); \
+ ASSERT(blocks_per_folio <= BITS_PER_LONG); \
*dst = bitmap_read(subpage->bitmaps, \
blocks_per_folio * btrfs_bitmap_nr_##name, \
blocks_per_folio); \
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 43979891f7c8..2b66a6130269 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -2235,7 +2235,7 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
btrfs_err(fs_info,
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
eb->start, check->level, found_level);
- return -EIO;
+ return -EUCLEAN;
}
if (!check->has_first_key)
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index fb8b8b29c169..4a3e02b49f29 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1277,7 +1277,7 @@ struct zone_info {
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct zone_info *info, unsigned long *active,
- struct btrfs_chunk_map *map)
+ struct btrfs_chunk_map *map, bool new)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device;
@@ -1307,6 +1307,8 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
return 0;
}
+ ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
+
/* This zone will be used for allocation, so mark this zone non-empty. */
btrfs_dev_clear_zone_empty(device, info->physical);
@@ -1319,6 +1321,18 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
* to determine the allocation offset within the zone.
*/
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
+
+ if (new) {
+ sector_t capacity;
+
+ capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
+ up_read(&dev_replace->rwsem);
+ info->alloc_offset = 0;
+ info->capacity = capacity << SECTOR_SHIFT;
+
+ return 0;
+ }
+
nofs_flag = memalloc_nofs_save();
ret = btrfs_get_dev_zone(device, info->physical, &zone);
memalloc_nofs_restore(nofs_flag);
@@ -1588,7 +1602,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
for (i = 0; i < map->num_stripes; i++) {
- ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
+ ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
if (ret)
goto out;
@@ -1659,7 +1673,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
* stripe.
*/
cache->alloc_offset = cache->zone_capacity;
- ret = 0;
}
out:
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 678dc38442bf..2b8875a82ff8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -712,23 +712,6 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
(q->limits.features & BLK_FEAT_ZONED);
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return disk->nr_zones;
-}
-bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
-static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
-{
- return false;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
if (!blk_queue_is_zoned(disk->queue))
@@ -736,11 +719,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return disk_nr_zones(bdev->bd_disk);
-}
-
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return bdev->bd_disk->queue->limits.max_open_zones;
@@ -847,6 +825,51 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
(sb->s_blocksize_bits - SECTOR_SHIFT);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return disk->nr_zones;
+}
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
+ *
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
+ */
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
+{
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
+
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
+}
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
+{
+ return disk_zone_capacity(bdev->bd_disk, pos);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
+
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void put_disk(struct gendisk *disk);
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index bf2bf40d7b18..8d5ac16a9b17 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -102,11 +102,11 @@ do { \
l = (local_lock_t *)this_cpu_ptr(lock); \
tl = (local_trylock_t *)l; \
_Generic((lock), \
- local_trylock_t *: ({ \
+ __percpu local_trylock_t *: ({ \
lockdep_assert(tl->acquired == 0); \
WRITE_ONCE(tl->acquired, 1); \
}), \
- default:(void)0); \
+ __percpu local_lock_t *: (void)0); \
local_lock_acquire(l); \
} while (0)
@@ -171,11 +171,11 @@ do { \
tl = (local_trylock_t *)l; \
local_lock_release(l); \
_Generic((lock), \
- local_trylock_t *: ({ \
+ __percpu local_trylock_t *: ({ \
lockdep_assert(tl->acquired == 1); \
WRITE_ONCE(tl->acquired, 0); \
}), \
- default:(void)0); \
+ __percpu local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 1f5773ab5660..30659b615fca 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -361,23 +361,29 @@ int mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
/**
- * mac_link_down() - take the link down
+ * mac_link_down() - notification that the link has gone down
* @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
*
- * If @mode is not an in-band negotiation mode (as defined by
- * phylink_autoneg_inband()), force the link down and disable any
- * Energy Efficient Ethernet MAC configuration. Interface type
- * selection must be done in mac_config().
+ * Notifies the MAC that the link has gone down. This will not be called
+ * unless mac_link_up() has been previously called.
+ *
+ * The MAC should stop processing packets for transmission and reception.
+ * phylink will have called netif_carrier_off() to notify the networking
+ * stack that the link has gone down, so MAC drivers should not make this
+ * call.
+ *
+ * If @mode is %MLO_AN_INBAND, then this function must not prevent the
+ * link coming up.
*/
void mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
/**
- * mac_link_up() - allow the link to come up
+ * mac_link_up() - notification that the link has come up
* @config: a pointer to a &struct phylink_config.
- * @phy: any attached phy
+ * @phy: any attached phy (deprecated - please use LPI interfaces)
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
@@ -385,7 +391,10 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
* @tx_pause: link transmit pause enablement status
* @rx_pause: link receive pause enablement status
*
- * Configure the MAC for an established link.
+ * Notifies the MAC that the link has come up, and the parameters of the
+ * link as seen from the MACs point of view. If mac_link_up() has been
+ * called previously, there will be an intervening call to mac_link_down()
+ * before this method will be subsequently called.
*
* @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
* settings, and should be used to configure the MAC block appropriately
@@ -397,9 +406,9 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
* that the user wishes to override the pause settings, and this should
* be allowed when considering the implementation of this method.
*
- * If in-band negotiation mode is disabled, allow the link to come up. If
- * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
- * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Once configured, the MAC may begin to process packets for transmission
+ * and reception.
+ *
* Interface type selection must be done in mac_config().
*/
void mac_link_up(struct phylink_config *config, struct phy_device *phy,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 4d16c13d0df5..64cb4b04be7a 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -220,6 +220,8 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
* occurs.
* @reset_done: optional function to call after transport specific reset
* operation has finished.
+ * @shutdown: synchronize with the device on shutdown. If provided, replaces
+ * the virtio core implementation.
*/
struct virtio_driver {
struct device_driver driver;
@@ -237,6 +239,7 @@ struct virtio_driver {
int (*restore)(struct virtio_device *dev);
int (*reset_prepare)(struct virtio_device *dev);
int (*reset_done)(struct virtio_device *dev);
+ void (*shutdown)(struct virtio_device *dev);
};
#define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver)
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index d9d0cb827117..f030adc462ee 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -53,43 +53,70 @@ struct landlock_ruleset_attr {
__u64 scoped;
};
-/*
- * sys_landlock_create_ruleset() flags:
+/**
+ * DOC: landlock_create_ruleset_flags
+ *
+ * **Flags**
*
- * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
- * version.
- * - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues.
+ * %LANDLOCK_CREATE_RULESET_VERSION
+ * Get the highest supported Landlock ABI version (starting at 1).
+ *
+ * %LANDLOCK_CREATE_RULESET_ERRATA
+ * Get a bitmask of fixed issues for the current Landlock ABI version.
*/
/* clang-format off */
#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0)
#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1)
/* clang-format on */
-/*
- * sys_landlock_restrict_self() flags:
- *
- * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF: Do not create any log related to the
- * enforced restrictions. This should only be set by tools launching unknown
- * or untrusted programs (e.g. a sandbox tool, container runtime, system
- * service manager). Because programs sandboxing themselves should fix any
- * denied access, they should not set this flag to be aware of potential
- * issues reported by system's logs (i.e. audit).
- * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON: Explicitly ask to continue
- * logging denied access requests even after an :manpage:`execve(2)` call.
- * This flag should only be set if all the programs than can legitimately be
- * executed will not try to request a denied access (which could spam audit
- * logs).
- * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF: Do not create any log related
- * to the enforced restrictions coming from future nested domains created by
- * the caller or its descendants. This should only be set according to a
- * runtime configuration (i.e. not hardcoded) by programs launching other
- * unknown or untrusted programs that may create their own Landlock domains
- * and spam logs. The main use case is for container runtimes to enable users
- * to mute buggy sandboxed programs for a specific container image. Other use
- * cases include sandboxer tools and init systems. Unlike
- * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF,
- * %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF does not impact the requested
- * restriction (if any) but only the future nested domains.
+/**
+ * DOC: landlock_restrict_self_flags
+ *
+ * **Flags**
+ *
+ * By default, denied accesses originating from programs that sandbox themselves
+ * are logged via the audit subsystem. Such events typically indicate unexpected
+ * behavior, such as bugs or exploitation attempts. However, to avoid excessive
+ * logging, access requests denied by a domain not created by the originating
+ * program are not logged by default. The rationale is that programs should know
+ * their own behavior, but not necessarily the behavior of other programs. This
+ * default configuration is suitable for most programs that sandbox themselves.
+ * For specific use cases, the following flags allow programs to modify this
+ * default logging behavior.
+ *
+ * The %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF and
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON flags apply to the newly created
+ * Landlock domain.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
+ * Disables logging of denied accesses originating from the thread creating
+ * the Landlock domain, as well as its children, as long as they continue
+ * running the same executable code (i.e., without an intervening
+ * :manpage:`execve(2)` call). This is intended for programs that execute
+ * unknown code without invoking :manpage:`execve(2)`, such as script
+ * interpreters. Programs that only sandbox themselves should not set this
+ * flag, so users can be notified of unauthorized access attempts via system
+ * logs.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
+ * Enables logging of denied accesses after an :manpage:`execve(2)` call,
+ * providing visibility into unauthorized access attempts by newly executed
+ * programs within the created Landlock domain. This flag is recommended
+ * only when all potential executables in the domain are expected to comply
+ * with the access restrictions, as excessive audit log entries could make
+ * it more difficult to identify critical events.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
+ * Disables logging of denied accesses originating from nested Landlock
+ * domains created by the caller or its descendants. This flag should be set
+ * according to runtime configuration, not hardcoded, to avoid suppressing
+ * important security events. It is useful for container runtimes or
+ * sandboxing tools that may launch programs which themselves create
+ * Landlock domains and could otherwise generate excessive logs. Unlike
+ * ``LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF``, this flag only affects
+ * future nested domains, not the one being created. It can also be used
+ * with a @ruleset_fd value of -1 to mute subdomain logs without creating a
+ * domain.
*/
/* clang-format off */
#define LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF (1U << 0)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index b95dd84eef2d..d4b3e2ae1314 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -28,10 +28,10 @@
/* Set current process as the (exclusive) owner of this file descriptor. This
* must be called before any other vhost command. Further calls to
- * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */
+ * VHOST_SET_OWNER fail until VHOST_RESET_OWNER is called. */
#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
/* Give up ownership, and reset the device to default values.
- * Allows subsequent call to VHOST_OWNER_SET to succeed. */
+ * Allows subsequent call to VHOST_SET_OWNER to succeed. */
#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
/* Set up/modify memory layout */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 8549d4571257..c691ac210ce2 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -246,6 +246,7 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SELF 0x0
#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
/* Transitional device admin command. */
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index 41ff44dfa1db..f52de5ed1b3b 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,4 +107,10 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+/*
+ * Some ufs devices may need more time to be in hibern8 before exiting.
+ * Enable this quirk to give it an additional 100us.
+ */
+#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 3caf2cd86e65..63e5b90da1f3 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -90,7 +90,7 @@
DEFINE_MUTEX(cgroup_mutex);
DEFINE_SPINLOCK(css_set_lock);
-#ifdef CONFIG_PROVE_RCU
+#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP)
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_lock);
#endif
@@ -2353,9 +2353,37 @@ static struct file_system_type cgroup2_fs_type = {
};
#ifdef CONFIG_CPUSETS_V1
+enum cpuset_param {
+ Opt_cpuset_v2_mode,
+};
+
+static const struct fs_parameter_spec cpuset_fs_parameters[] = {
+ fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
+ {}
+};
+
+static int cpuset_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, cpuset_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_cpuset_v2_mode:
+ ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static const struct fs_context_operations cpuset_fs_context_ops = {
.get_tree = cgroup1_get_tree,
.free = cgroup_fs_context_free,
+ .parse_param = cpuset_parse_param,
};
/*
@@ -2392,6 +2420,7 @@ static int cpuset_init_fs_context(struct fs_context *fc)
static struct file_system_type cpuset_fs_type = {
.name = "cpuset",
.init_fs_context = cpuset_init_fs_context,
+ .parameters = cpuset_fs_parameters,
.fs_flags = FS_USERNS_MOUNT,
};
#endif
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 66bcd40a28ca..fdbf249d1c68 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -163,7 +163,7 @@ enum scx_ops_flags {
/*
* CPU cgroup support flags
*/
- SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
+ SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */
SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
SCX_OPS_ENQ_LAST |
@@ -3899,35 +3899,6 @@ bool scx_can_stop_tick(struct rq *rq)
DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
static bool scx_cgroup_enabled;
-static bool cgroup_warned_missing_weight;
-static bool cgroup_warned_missing_idle;
-
-static void scx_cgroup_warn_missing_weight(struct task_group *tg)
-{
- if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
- cgroup_warned_missing_weight)
- return;
-
- if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
- return;
-
- pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
- scx_ops.name);
- cgroup_warned_missing_weight = true;
-}
-
-static void scx_cgroup_warn_missing_idle(struct task_group *tg)
-{
- if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
- return;
-
- if (!tg->idle)
- return;
-
- pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
- scx_ops.name);
- cgroup_warned_missing_idle = true;
-}
int scx_tg_online(struct task_group *tg)
{
@@ -3937,8 +3908,6 @@ int scx_tg_online(struct task_group *tg)
percpu_down_read(&scx_cgroup_rwsem);
- scx_cgroup_warn_missing_weight(tg);
-
if (scx_cgroup_enabled) {
if (SCX_HAS_OP(cgroup_init)) {
struct scx_cgroup_init_args args =
@@ -4076,9 +4045,7 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight)
void scx_group_set_idle(struct task_group *tg, bool idle)
{
- percpu_down_read(&scx_cgroup_rwsem);
- scx_cgroup_warn_missing_idle(tg);
- percpu_up_read(&scx_cgroup_rwsem);
+ /* TODO: Implement ops->cgroup_set_idle() */
}
static void scx_cgroup_lock(void)
@@ -4272,9 +4239,6 @@ static int scx_cgroup_init(void)
percpu_rwsem_assert_held(&scx_cgroup_rwsem);
- cgroup_warned_missing_weight = false;
- cgroup_warned_missing_idle = false;
-
/*
* scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
* cgroups and init, all online cgroups are initialized.
@@ -4284,9 +4248,6 @@ static int scx_cgroup_init(void)
struct task_group *tg = css_tg(css);
struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
- scx_cgroup_warn_missing_weight(tg);
- scx_cgroup_warn_missing_idle(tg);
-
if ((tg->scx_flags &
(SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
continue;
@@ -4623,7 +4584,7 @@ unlock:
static void free_exit_info(struct scx_exit_info *ei)
{
- kfree(ei->dump);
+ kvfree(ei->dump);
kfree(ei->msg);
kfree(ei->bt);
kfree(ei);
@@ -4639,7 +4600,7 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
- ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
+ ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
if (!ei->bt || !ei->msg || !ei->dump) {
free_exit_info(ei);
@@ -5252,6 +5213,9 @@ static int validate_ops(const struct sched_ext_ops *ops)
return -EINVAL;
}
+ if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
+ pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
+
return 0;
}
diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
index 2ef2e1b80091..2f844c279a3e 100644
--- a/kernel/vhost_task.c
+++ b/kernel/vhost_task.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
* @arg: data to be passed to fn and handled_kill
* @name: the thread's name
*
- * This returns a specialized task for use by the vhost layer or NULL on
+ * This returns a specialized task for use by the vhost layer or ERR_PTR() on
* failure. The returned task is inactive, and the caller must fire it up
* through vhost_task_start().
*/
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index e39a459540ec..60f27cb4e54f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -333,6 +333,8 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst;
int ret;
+ local_bh_disable();
+
if (dev_xmit_recursion()) {
net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
__func__);
@@ -348,8 +350,10 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
- lwtstate->type > LWTUNNEL_ENCAP_MAX)
- return 0;
+ lwtstate->type > LWTUNNEL_ENCAP_MAX) {
+ ret = 0;
+ goto out;
+ }
ret = -EOPNOTSUPP;
rcu_read_lock();
@@ -364,11 +368,13 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (ret == -EOPNOTSUPP)
goto drop;
- return ret;
+ goto out;
drop:
kfree_skb(skb);
+out:
+ local_bh_enable();
return ret;
}
EXPORT_SYMBOL_GPL(lwtunnel_output);
@@ -380,6 +386,8 @@ int lwtunnel_xmit(struct sk_buff *skb)
struct dst_entry *dst;
int ret;
+ local_bh_disable();
+
if (dev_xmit_recursion()) {
net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
__func__);
@@ -396,8 +404,10 @@ int lwtunnel_xmit(struct sk_buff *skb)
lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
- lwtstate->type > LWTUNNEL_ENCAP_MAX)
- return 0;
+ lwtstate->type > LWTUNNEL_ENCAP_MAX) {
+ ret = 0;
+ goto out;
+ }
ret = -EOPNOTSUPP;
rcu_read_lock();
@@ -412,11 +422,13 @@ int lwtunnel_xmit(struct sk_buff *skb)
if (ret == -EOPNOTSUPP)
goto drop;
- return ret;
+ goto out;
drop:
kfree_skb(skb);
+out:
+ local_bh_enable();
return ret;
}
EXPORT_SYMBOL_GPL(lwtunnel_xmit);
@@ -428,6 +440,8 @@ int lwtunnel_input(struct sk_buff *skb)
struct dst_entry *dst;
int ret;
+ DEBUG_NET_WARN_ON_ONCE(!in_softirq());
+
if (dev_xmit_recursion()) {
net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
__func__);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 5d7af50fe702..230743bdbb14 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -861,14 +861,17 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&priv->lock);
+ err = 0;
netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
- if (!netdev || !netif_device_present(netdev)) {
+ if (!netdev) {
err = -ENODEV;
goto err_unlock_sock;
}
-
- if (!netdev_need_ops_lock(netdev)) {
+ if (!netif_device_present(netdev))
+ err = -ENODEV;
+ else if (!netdev_need_ops_lock(netdev))
err = -EOPNOTSUPP;
+ if (err) {
NL_SET_BAD_ATTR(info->extack,
info->attrs[NETDEV_A_DEV_IFINDEX]);
goto err_unlock;
diff --git a/net/core/selftests.c b/net/core/selftests.c
index e99ae983fca9..35f807ea9952 100644
--- a/net/core/selftests.c
+++ b/net/core/selftests.c
@@ -100,10 +100,10 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
ehdr->h_proto = htons(ETH_P_IP);
if (attr->tcp) {
+ memset(thdr, 0, sizeof(*thdr));
thdr->source = htons(attr->sport);
thdr->dest = htons(attr->dport);
thdr->doff = sizeof(struct tcphdr) / 4;
- thdr->check = 0;
} else {
uhdr->source = htons(attr->sport);
uhdr->dest = htons(attr->dport);
@@ -144,10 +144,18 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
attr->id = net_test_next_id;
shdr->id = net_test_next_id++;
- if (attr->size)
- skb_put(skb, attr->size);
- if (attr->max_size && attr->max_size > skb->len)
- skb_put(skb, attr->max_size - skb->len);
+ if (attr->size) {
+ void *payload = skb_put(skb, attr->size);
+
+ memset(payload, 0, attr->size);
+ }
+
+ if (attr->max_size && attr->max_size > skb->len) {
+ size_t pad_len = attr->max_size - skb->len;
+ void *pad = skb_put(skb, pad_len);
+
+ memset(pad, 0, pad_len);
+ }
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index 2cb62f026b1f..a715dcbe0146 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -337,7 +337,11 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
release_sock(sk);
- sock_kfree_s(sk, match, sizeof(*match));
+ kfree_rcu_mightsleep(match);
+ /* Adjust sk_omem_alloc like sock_kfree_s() does, to match
+ * with allocation of this memory by sock_kmemdup()
+ */
+ atomic_sub(sizeof(*match), &sk->sk_omem_alloc);
err = 0;
out:
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index ce5045eea065..6c8ef826cec0 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -961,6 +961,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) {
int old_flags;
+ int len = 0;
if (parentid) {
if (cl->cl_parent &&
@@ -991,9 +992,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (usc != NULL)
hfsc_change_usc(cl, usc, cur_time);
+ if (cl->qdisc->q.qlen != 0)
+ len = qdisc_peek_len(cl->qdisc);
+ /* Check queue length again since some qdisc implementations
+ * (e.g., netem/codel) might empty the queue during the peek
+ * operation.
+ */
if (cl->qdisc->q.qlen != 0) {
- int len = qdisc_peek_len(cl->qdisc);
-
if (cl->cl_flags & HFSC_RSC) {
if (old_flags & HFSC_RSC)
update_ed(cl, len);
@@ -1636,10 +1641,16 @@ hfsc_dequeue(struct Qdisc *sch)
if (cl->qdisc->q.qlen != 0) {
/* update ed */
next_len = qdisc_peek_len(cl->qdisc);
- if (realtime)
- update_ed(cl, next_len);
- else
- update_d(cl, next_len);
+ /* Check queue length again since some qdisc implementations
+ * (e.g., netem/codel) might empty the queue during the peek
+ * operation.
+ */
+ if (cl->qdisc->q.qlen != 0) {
+ if (realtime)
+ update_ed(cl, next_len);
+ else
+ update_d(cl, next_len);
+ }
} else {
/* the class becomes passive */
eltree_remove(cl);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index e2f19627e43d..b45c5b91bc7a 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -716,7 +716,8 @@ void tipc_mon_reinit_self(struct net *net)
if (!mon)
continue;
write_lock_bh(&mon->lock);
- mon->self->addr = tipc_own_addr(net);
+ if (mon->self)
+ mon->self->addr = tipc_own_addr(net);
write_unlock_bh(&mon->lock);
}
}
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index d75897559d18..2d6e59561c9d 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -15,7 +15,7 @@ KBUILD_CFLAGS += -Werror=return-type
KBUILD_CFLAGS += -Werror=strict-prototypes
KBUILD_CFLAGS += -Wno-format-security
KBUILD_CFLAGS += -Wno-trigraphs
-KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS += $(call cc-disable-warning, frame-address)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += -Wmissing-declarations
KBUILD_CFLAGS += -Wmissing-prototypes
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f3e7ac513db3..f99ab1a3b0f0 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -245,7 +245,9 @@ static int process_measurement(struct file *file, const struct cred *cred,
&allowed_algos);
violation_check = ((func == FILE_CHECK || func == MMAP_CHECK ||
func == MMAP_CHECK_REQPROT) &&
- (ima_policy_flag & IMA_MEASURE));
+ (ima_policy_flag & IMA_MEASURE) &&
+ ((action & IMA_MEASURE) ||
+ (file->f_mode & FMODE_WRITE)));
if (!action && !violation_check)
return 0;
diff --git a/security/landlock/domain.c b/security/landlock/domain.c
index bae2e9909013..a647b68e8d06 100644
--- a/security/landlock/domain.c
+++ b/security/landlock/domain.c
@@ -16,6 +16,7 @@
#include <linux/path.h>
#include <linux/pid.h>
#include <linux/sched.h>
+#include <linux/signal.h>
#include <linux/uidgid.h>
#include "access.h"
@@ -99,8 +100,7 @@ static struct landlock_details *get_current_details(void)
return ERR_PTR(-ENOMEM);
memcpy(details->exe_path, path_str, path_size);
- WARN_ON_ONCE(current_cred() != current_real_cred());
- details->pid = get_pid(task_pid(current));
+ details->pid = get_pid(task_tgid(current));
details->uid = from_kuid(&init_user_ns, current_uid());
get_task_comm(details->comm, current);
return details;
diff --git a/security/landlock/domain.h b/security/landlock/domain.h
index ed0d348e214c..7fb70b25f85a 100644
--- a/security/landlock/domain.h
+++ b/security/landlock/domain.h
@@ -130,7 +130,7 @@ int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy);
static inline void
landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy)
{
- if (WARN_ON_ONCE(!hierarchy || !hierarchy->details))
+ if (!hierarchy || !hierarchy->details)
return;
put_pid(hierarchy->details->pid);
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index 54a9f29e6ebb..b9561e3417ae 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -169,20 +169,16 @@ const int landlock_abi_version = 7;
* the new ruleset.
* @size: Size of the pointed &struct landlock_ruleset_attr (needed for
* backward and forward compatibility).
- * @flags: Supported value:
+ * @flags: Supported values:
+ *
* - %LANDLOCK_CREATE_RULESET_VERSION
* - %LANDLOCK_CREATE_RULESET_ERRATA
*
* This system call enables to create a new Landlock ruleset, and returns the
* related file descriptor on success.
*
- * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is
- * 0, then the returned value is the highest supported Landlock ABI version
- * (starting at 1).
- *
- * If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is
- * 0, then the returned value is a bitmask of fixed issues for the current
- * Landlock ABI version.
+ * If %LANDLOCK_CREATE_RULESET_VERSION or %LANDLOCK_CREATE_RULESET_ERRATA is
+ * set, then @attr must be NULL and @size must be 0.
*
* Possible returned errors are:
*
@@ -191,6 +187,9 @@ const int landlock_abi_version = 7;
* - %E2BIG: @attr or @size inconsistencies;
* - %EFAULT: @attr or @size inconsistencies;
* - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
+ *
+ * .. kernel-doc:: include/uapi/linux/landlock.h
+ * :identifiers: landlock_create_ruleset_flags
*/
SYSCALL_DEFINE3(landlock_create_ruleset,
const struct landlock_ruleset_attr __user *const, attr,
@@ -452,18 +451,15 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* @ruleset_fd: File descriptor tied to the ruleset to merge with the target.
* @flags: Supported values:
*
- * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
- * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
- * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
+ * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
+ * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
+ * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
*
* This system call enables to enforce a Landlock ruleset on the current
* thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its
* namespace or is running with no_new_privs. This avoids scenarios where
* unprivileged tasks can affect the behavior of privileged children.
*
- * It is allowed to only pass the %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
- * flag with a @ruleset_fd value of -1.
- *
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
@@ -475,6 +471,9 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* %CAP_SYS_ADMIN in its namespace.
* - %E2BIG: The maximum number of stacked rulesets is reached for the current
* thread.
+ *
+ * .. kernel-doc:: include/uapi/linux/landlock.h
+ * :identifiers: landlock_restrict_self_flags
*/
SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
flags)
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index 2c720e3ecad5..fdc7170639e6 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -950,5 +950,5 @@ SCX_OPS_DEFINE(flatcg_ops,
.cgroup_move = (void *)fcg_cgroup_move,
.init = (void *)fcg_init,
.exit = (void *)fcg_exit,
- .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
+ .flags = SCX_OPS_ENQ_EXITING,
.name = "flatcg");
diff --git a/tools/testing/selftests/landlock/audit.h b/tools/testing/selftests/landlock/audit.h
index b9054086a0c9..18a6014920b5 100644
--- a/tools/testing/selftests/landlock/audit.h
+++ b/tools/testing/selftests/landlock/audit.h
@@ -300,15 +300,22 @@ out:
return err;
}
-static int __maybe_unused matches_log_domain_allocated(int audit_fd,
+static int __maybe_unused matches_log_domain_allocated(int audit_fd, pid_t pid,
__u64 *domain_id)
{
- return audit_match_record(
- audit_fd, AUDIT_LANDLOCK_DOMAIN,
- REGEX_LANDLOCK_PREFIX
- " status=allocated mode=enforcing pid=[0-9]\\+ uid=[0-9]\\+"
- " exe=\"[^\"]\\+\" comm=\".*_test\"$",
- domain_id);
+ static const char log_template[] = REGEX_LANDLOCK_PREFIX
+ " status=allocated mode=enforcing pid=%d uid=[0-9]\\+"
+ " exe=\"[^\"]\\+\" comm=\".*_test\"$";
+ char log_match[sizeof(log_template) + 10];
+ int log_match_len;
+
+ log_match_len =
+ snprintf(log_match, sizeof(log_match), log_template, pid);
+ if (log_match_len > sizeof(log_match))
+ return -E2BIG;
+
+ return audit_match_record(audit_fd, AUDIT_LANDLOCK_DOMAIN, log_match,
+ domain_id);
}
static int __maybe_unused matches_log_domain_deallocated(
diff --git a/tools/testing/selftests/landlock/audit_test.c b/tools/testing/selftests/landlock/audit_test.c
index a0643070c403..cfc571afd0eb 100644
--- a/tools/testing/selftests/landlock/audit_test.c
+++ b/tools/testing/selftests/landlock/audit_test.c
@@ -9,6 +9,7 @@
#include <errno.h>
#include <limits.h>
#include <linux/landlock.h>
+#include <pthread.h>
#include <stdlib.h>
#include <sys/mount.h>
#include <sys/prctl.h>
@@ -40,7 +41,6 @@ FIXTURE(audit)
{
struct audit_filter audit_filter;
int audit_fd;
- __u64(*domain_stack)[16];
};
FIXTURE_SETUP(audit)
@@ -60,18 +60,10 @@ FIXTURE_SETUP(audit)
TH_LOG("Failed to initialize audit: %s", error_msg);
}
clear_cap(_metadata, CAP_AUDIT_CONTROL);
-
- self->domain_stack = mmap(NULL, sizeof(*self->domain_stack),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- ASSERT_NE(MAP_FAILED, self->domain_stack);
- memset(self->domain_stack, 0, sizeof(*self->domain_stack));
}
FIXTURE_TEARDOWN(audit)
{
- EXPECT_EQ(0, munmap(self->domain_stack, sizeof(*self->domain_stack)));
-
set_cap(_metadata, CAP_AUDIT_CONTROL);
EXPECT_EQ(0, audit_cleanup(self->audit_fd, &self->audit_filter));
clear_cap(_metadata, CAP_AUDIT_CONTROL);
@@ -83,9 +75,15 @@ TEST_F(audit, layers)
.scoped = LANDLOCK_SCOPE_SIGNAL,
};
int status, ruleset_fd, i;
+ __u64(*domain_stack)[16];
__u64 prev_dom = 3;
pid_t child;
+ domain_stack = mmap(NULL, sizeof(*domain_stack), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(MAP_FAILED, domain_stack);
+ memset(domain_stack, 0, sizeof(*domain_stack));
+
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
@@ -94,7 +92,7 @@ TEST_F(audit, layers)
child = fork();
ASSERT_LE(0, child);
if (child == 0) {
- for (i = 0; i < ARRAY_SIZE(*self->domain_stack); i++) {
+ for (i = 0; i < ARRAY_SIZE(*domain_stack); i++) {
__u64 denial_dom = 1;
__u64 allocated_dom = 2;
@@ -107,7 +105,8 @@ TEST_F(audit, layers)
matches_log_signal(_metadata, self->audit_fd,
getppid(), &denial_dom));
EXPECT_EQ(0, matches_log_domain_allocated(
- self->audit_fd, &allocated_dom));
+ self->audit_fd, getpid(),
+ &allocated_dom));
EXPECT_NE(denial_dom, 1);
EXPECT_NE(denial_dom, 0);
EXPECT_EQ(denial_dom, allocated_dom);
@@ -115,7 +114,7 @@ TEST_F(audit, layers)
/* Checks that the new domain is younger than the previous one. */
EXPECT_GT(allocated_dom, prev_dom);
prev_dom = allocated_dom;
- (*self->domain_stack)[i] = allocated_dom;
+ (*domain_stack)[i] = allocated_dom;
}
/* Checks that we reached the maximum number of layers. */
@@ -142,23 +141,143 @@ TEST_F(audit, layers)
/* Purges log from deallocated domains. */
EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
&audit_tv_dom_drop, sizeof(audit_tv_dom_drop)));
- for (i = ARRAY_SIZE(*self->domain_stack) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(*domain_stack) - 1; i >= 0; i--) {
__u64 deallocated_dom = 2;
EXPECT_EQ(0, matches_log_domain_deallocated(self->audit_fd, 1,
&deallocated_dom));
- EXPECT_EQ((*self->domain_stack)[i], deallocated_dom)
+ EXPECT_EQ((*domain_stack)[i], deallocated_dom)
{
TH_LOG("Failed to match domain %llx (#%d)",
- (*self->domain_stack)[i], i);
+ (*domain_stack)[i], i);
}
}
+ EXPECT_EQ(0, munmap(domain_stack, sizeof(*domain_stack)));
EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
&audit_tv_default, sizeof(audit_tv_default)));
-
EXPECT_EQ(0, close(ruleset_fd));
}
+struct thread_data {
+ pid_t parent_pid;
+ int ruleset_fd, pipe_child, pipe_parent;
+};
+
+static void *thread_audit_test(void *arg)
+{
+ const struct thread_data *data = (struct thread_data *)arg;
+ uintptr_t err = 0;
+ char buffer;
+
+ /* TGID and TID are different for a second thread. */
+ if (getpid() == gettid()) {
+ err = 1;
+ goto out;
+ }
+
+ if (landlock_restrict_self(data->ruleset_fd, 0)) {
+ err = 2;
+ goto out;
+ }
+
+ if (close(data->ruleset_fd)) {
+ err = 3;
+ goto out;
+ }
+
+ /* Creates a denial to get the domain ID. */
+ if (kill(data->parent_pid, 0) != -1) {
+ err = 4;
+ goto out;
+ }
+
+ if (EPERM != errno) {
+ err = 5;
+ goto out;
+ }
+
+ /* Signals the parent to read denial logs. */
+ if (write(data->pipe_child, ".", 1) != 1) {
+ err = 6;
+ goto out;
+ }
+
+ /* Waits for the parent to update audit filters. */
+ if (read(data->pipe_parent, &buffer, 1) != 1) {
+ err = 7;
+ goto out;
+ }
+
+out:
+ close(data->pipe_child);
+ close(data->pipe_parent);
+ return (void *)err;
+}
+
+/* Checks that the PID tied to a domain is not a TID but the TGID. */
+TEST_F(audit, thread)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .scoped = LANDLOCK_SCOPE_SIGNAL,
+ };
+ __u64 denial_dom = 1;
+ __u64 allocated_dom = 2;
+ __u64 deallocated_dom = 3;
+ pthread_t thread;
+ int pipe_child[2], pipe_parent[2];
+ char buffer;
+ struct thread_data child_data;
+
+ child_data.parent_pid = getppid();
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+ child_data.pipe_child = pipe_child[1];
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ child_data.pipe_parent = pipe_parent[0];
+ child_data.ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, child_data.ruleset_fd);
+
+ /* TGID and TID are the same for the initial thread . */
+ EXPECT_EQ(getpid(), gettid());
+ EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, pthread_create(&thread, NULL, thread_audit_test,
+ &child_data));
+
+ /* Waits for the child to generate a denial. */
+ ASSERT_EQ(1, read(pipe_child[0], &buffer, 1));
+ EXPECT_EQ(0, close(pipe_child[0]));
+
+ /* Matches the signal log to get the domain ID. */
+ EXPECT_EQ(0, matches_log_signal(_metadata, self->audit_fd,
+ child_data.parent_pid, &denial_dom));
+ EXPECT_NE(denial_dom, 1);
+ EXPECT_NE(denial_dom, 0);
+
+ EXPECT_EQ(0, matches_log_domain_allocated(self->audit_fd, getpid(),
+ &allocated_dom));
+ EXPECT_EQ(denial_dom, allocated_dom);
+
+ /* Updates filter rules to match the drop record. */
+ set_cap(_metadata, CAP_AUDIT_CONTROL);
+ EXPECT_EQ(0, audit_filter_drop(self->audit_fd, AUDIT_ADD_RULE));
+ EXPECT_EQ(0, audit_filter_exe(self->audit_fd, &self->audit_filter,
+ AUDIT_DEL_RULE));
+ clear_cap(_metadata, CAP_AUDIT_CONTROL);
+
+ /* Signals the thread to exit, which will generate a domain deallocation. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ ASSERT_EQ(0, pthread_join(thread, NULL));
+
+ EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
+ &audit_tv_dom_drop, sizeof(audit_tv_dom_drop)));
+ EXPECT_EQ(0, matches_log_domain_deallocated(self->audit_fd, 1,
+ &deallocated_dom));
+ EXPECT_EQ(denial_dom, deallocated_dom);
+ EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
+ &audit_tv_default, sizeof(audit_tv_default)));
+}
+
FIXTURE(audit_flags)
{
struct audit_filter audit_filter;
@@ -273,7 +392,8 @@ TEST_F(audit_flags, signal)
/* Checks domain information records. */
EXPECT_EQ(0, matches_log_domain_allocated(
- self->audit_fd, &allocated_dom));
+ self->audit_fd, getpid(),
+ &allocated_dom));
EXPECT_NE(*self->domain_id, 1);
EXPECT_NE(*self->domain_id, 0);
EXPECT_EQ(*self->domain_id, allocated_dom);
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index f819011a8798..73729382d40f 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -5964,7 +5964,8 @@ TEST_F(audit_layout1, refer_handled)
EXPECT_EQ(EXDEV, errno);
EXPECT_EQ(0, matches_log_fs(_metadata, self->audit_fd, "fs\\.refer",
dir_s1d1));
- EXPECT_EQ(0, matches_log_domain_allocated(self->audit_fd, NULL));
+ EXPECT_EQ(0,
+ matches_log_domain_allocated(self->audit_fd, getpid(), NULL));
EXPECT_EQ(0, matches_log_fs(_metadata, self->audit_fd, "fs\\.refer",
dir_s1d3));
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 4f55477ffe08..e7a75341f0f3 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -206,9 +206,8 @@ chk_dump_one()
local token
local msg
- ss_token="$(ss -inmHMN $ns | grep 'token:' |\
- head -n 1 |\
- sed 's/.*token:\([0-9a-f]*\).*/\1/')"
+ ss_token="$(ss -inmHMN $ns |
+ mptcp_lib_get_info_value "token" "token")"
token="$(ip netns exec $ns ./mptcp_diag -t $ss_token |\
awk -F':[ \t]+' '/^token/ {print $2}')"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
index d4ea9cd845a3..e26bbc169783 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
@@ -313,5 +313,44 @@
"$TC qdisc del dev $DUMMY handle 1: root",
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
]
+ },
+ {
+ "id": "a4c3",
+ "name": "Test HFSC with netem/blackhole - queue emptying during peek operation",
+ "category": [
+ "qdisc",
+ "hfsc",
+ "netem",
+ "blackhole"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.10.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY handle 1:0 root drr",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:1 drr",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:2 drr",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 plug limit 1024",
+ "$TC qdisc add dev $DUMMY parent 1:2 handle 3:0 hfsc default 1",
+ "$TC class add dev $DUMMY parent 3:0 classid 3:1 hfsc rt m1 5Mbit d 10ms m2 10Mbit",
+ "$TC qdisc add dev $DUMMY parent 3:1 handle 4:0 netem delay 1ms",
+ "$TC qdisc add dev $DUMMY parent 4:1 handle 5:0 blackhole",
+ "ping -c 3 -W 0.01 -i 0.001 -s 1 10.10.10.10 -I $DUMMY > /dev/null 2>&1 || true",
+ "$TC class change dev $DUMMY parent 3:0 classid 3:1 hfsc sc m1 5Mbit d 10ms m2 10Mbit",
+ "$TC class del dev $DUMMY parent 3:0 classid 3:1",
+ "$TC class add dev $DUMMY parent 3:0 classid 3:1 hfsc rt m1 5Mbit d 10ms m2 10Mbit",
+ "ping -c 3 -W 0.01 -i 0.001 -s 1 10.10.10.10 -I $DUMMY > /dev/null 2>&1 || true"
+ ],
+ "cmdUnderTest": "$TC class change dev $DUMMY parent 3:0 classid 3:1 hfsc sc m1 5Mbit d 10ms m2 10Mbit",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "qdisc hfsc 3:.*parent 1:2.*default 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root",
+ "$IP addr del 10.10.10.10/24 dev $DUMMY || true"
+ ]
}
]