summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi3
-rw-r--r--arch/arm/boot/dts/am5748.dtsi4
-rw-r--r--arch/arm/boot/dts/integratorap-im-pd1.dts1
-rw-r--r--arch/arm/boot/dts/integratorap.dts9
-rw-r--r--arch/arm/mach-sunplus/Kconfig4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi2
-rw-r--r--arch/loongarch/include/asm/loongson.h2
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/traps.c15
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c9
-rw-r--r--arch/x86/events/intel/core.c40
-rw-r--r--arch/x86/events/intel/ds.c9
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/smp.h25
-rw-r--r--arch/x86/kernel/alternative.c45
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c5
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c15
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--drivers/acpi/processor_idle.c23
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-sata.c24
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/block/virtio_blk.c11
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c12
-rw-r--r--drivers/clk/imx/clk-imx6sx.c4
-rw-r--r--drivers/clk/imx/clk-imx93.c2
-rw-r--r--drivers/clk/ingenic/tcu.c15
-rw-r--r--drivers/clk/microchip/clk-mpfs.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c8
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c4
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c26
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c89
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c15
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c3
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c14
-rw-r--r--drivers/irqchip/irq-stm32-exti.c2
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c9
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c26
-rw-r--r--drivers/mmc/core/sd.c3
-rw-r--r--drivers/mmc/host/mmc_hsq.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c17
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/net/can/c_can/c_can.h17
-rw-r--r--drivers/net/can/c_can/c_can_main.c11
-rw-r--r--drivers/net/dsa/mt7530.c19
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c23
-rw-r--r--drivers/net/hippi/rrunner.c1
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/tun.c9
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/reset/reset-microchip-sparx5.c22
-rw-r--r--drivers/reset/reset-npcm.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c1
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c23
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c4
-rw-r--r--drivers/thunderbolt/switch.c1
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/storage/unusual_uas.h21
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c17
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c9
-rw-r--r--fs/coredump.c38
-rw-r--r--fs/ext4/mballoc.c3
-rw-r--r--fs/internal.h3
-rw-r--r--fs/ntfs/super.c3
-rw-r--r--fs/pstore/platform.c63
-rw-r--r--fs/read_write.c22
-rw-r--r--fs/xfs/xfs_notify_failure.c6
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/memcontrol.h45
-rw-r--r--include/linux/memremap.h5
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--io_uring/io_uring.c7
-rw-r--r--io_uring/poll.c2
-rw-r--r--kernel/events/core.c9
-rw-r--r--mm/damon/dbgfs.c19
-rw-r--r--mm/damon/sysfs.c2
-rw-r--r--mm/frontswap.c3
-rw-r--r--mm/gup.c34
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/khugepaged.c10
-rw-r--r--mm/madvise.c7
-rw-r--r--mm/memory-failure.c27
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate_device.c16
-rw-r--r--mm/page_alloc.c65
-rw-r--r--mm/page_isolation.c25
-rw-r--r--mm/secretmem.c2
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c4
-rw-r--r--net/core/net_namespace.c7
-rw-r--r--net/mac80211/mlme.c9
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c6
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/mptcp/protocol.c16
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/subflow.c33
-rw-r--r--net/sched/act_ct.c5
-rw-r--r--net/wireless/util.c4
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/soc/codecs/nau8824.c17
-rw-r--r--sound/soc/codecs/nau8824.h1
-rw-r--r--sound/soc/codecs/rt5640.c64
-rw-r--r--sound/soc/codecs/rt5640.h14
-rw-r--r--sound/soc/codecs/tas2770.c3
-rw-r--r--sound/soc/fsl/imx-card.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw.c10
-rw-r--r--tools/include/linux/gfp.h21
-rw-r--r--tools/include/linux/gfp_types.h1
-rw-r--r--tools/perf/tests/mmap-basic.c3
-rw-r--r--tools/perf/tests/perf-record.c2
-rwxr-xr-xtools/perf/tests/shell/record.sh2
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh3
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c5
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/arm-spe.c2
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c11
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c18
-rw-r--r--tools/perf/util/parse-events-hybrid.c21
-rw-r--r--tools/perf/util/parse-events.c39
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/print-events.c39
-rw-r--r--tools/perf/util/scripting-engines/Build2
-rw-r--r--tools/testing/selftests/kvm/Makefile11
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c25
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h1
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c20
-rw-r--r--tools/testing/selftests/kvm/lib/string_override.c39
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c34
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c2
-rw-r--r--tools/virtio/linux/virtio.h3
-rw-r--r--tools/virtio/linux/virtio_config.h5
201 files changed, 1806 insertions, 1056 deletions
diff --git a/.mailmap b/.mailmap
index d175777af078..191778125ef1 100644
--- a/.mailmap
+++ b/.mailmap
@@ -71,6 +71,9 @@ Ben M Cahill <ben.m.cahill@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
+Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
+Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
+Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index f5ca4aefd184..72b9654f764c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2579,7 +2579,7 @@ W: http://www.armlinux.org.uk/
ARM/QUALCOMM SUPPORT
M: Andy Gross <agross@kernel.org>
-M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Bjorn Andersson <andersson@kernel.org>
R: Konrad Dybcio <konrad.dybcio@somainline.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -2670,7 +2670,6 @@ M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://www.armlinux.org.uk/
-F: arch/arm/include/asm/hardware/entry-macro-iomd.S
F: arch/arm/include/asm/hardware/ioc.h
F: arch/arm/include/asm/hardware/iomd.h
F: arch/arm/include/asm/hardware/memc.h
@@ -8944,7 +8943,7 @@ F: include/linux/hw_random.h
HARDWARE SPINLOCK CORE
M: Ohad Ben-Cohen <ohad@wizery.com>
-M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Bjorn Andersson <andersson@kernel.org>
R: Baolin Wang <baolin.wang7@gmail.com>
L: linux-remoteproc@vger.kernel.org
S: Maintained
@@ -16126,7 +16125,7 @@ F: drivers/gpio/gpio-sama5d2-piobu.c
F: drivers/pinctrl/pinctrl-at91*
PIN CONTROLLER - QUALCOMM
-M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Bjorn Andersson <andersson@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/qcom,*.txt
@@ -16819,7 +16818,7 @@ F: Documentation/devicetree/bindings/media/*camss*
F: drivers/media/platform/qcom/camss/
QUALCOMM CLOCK DRIVERS
-M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Bjorn Andersson <andersson@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
@@ -17309,7 +17308,7 @@ S: Supported
F: fs/reiserfs/
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
-M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Bjorn Andersson <andersson@kernel.org>
M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-remoteproc@vger.kernel.org
S: Maintained
@@ -17322,7 +17321,7 @@ F: include/linux/remoteproc.h
F: include/linux/remoteproc/
REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
-M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Bjorn Andersson <andersson@kernel.org>
M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-remoteproc@vger.kernel.org
S: Maintained
@@ -19961,7 +19960,7 @@ S: Supported
F: drivers/net/team/
F: include/linux/if_team.h
F: include/uapi/linux/if_team.h
-F: tools/testing/selftests/net/team/
+F: tools/testing/selftests/drivers/net/team/
TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT
M: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 7da42a5b959c..7e50fe633d8a 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -1502,8 +1502,7 @@
mmc1: mmc@0 {
compatible = "ti,am335-sdhci";
ti,needs-special-reset;
- dmas = <&edma_xbar 24 0 0
- &edma_xbar 25 0 0>;
+ dmas = <&edma 24 0>, <&edma 25 0>;
dma-names = "tx", "rx";
interrupts = <64>;
reg = <0x0 0x1000>;
diff --git a/arch/arm/boot/dts/am5748.dtsi b/arch/arm/boot/dts/am5748.dtsi
index c260aa1a85bd..a1f029e9d1f3 100644
--- a/arch/arm/boot/dts/am5748.dtsi
+++ b/arch/arm/boot/dts/am5748.dtsi
@@ -25,6 +25,10 @@
status = "disabled";
};
+&usb4_tm {
+ status = "disabled";
+};
+
&atl_tm {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/integratorap-im-pd1.dts b/arch/arm/boot/dts/integratorap-im-pd1.dts
index 4c22e4436271..cc514cf07bff 100644
--- a/arch/arm/boot/dts/integratorap-im-pd1.dts
+++ b/arch/arm/boot/dts/integratorap-im-pd1.dts
@@ -249,6 +249,7 @@
/* 640x480 16bpp @ 25.175MHz is 36827428 bytes/s */
max-memory-bandwidth = <40000000>;
memory-region = <&impd1_ram>;
+ dma-ranges;
port@0 {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 9b652cc27b14..9148287fa0a9 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -160,6 +160,7 @@
pci: pciv3@62000000 {
compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
+ device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
@@ -261,7 +262,7 @@
lm0: bus@c0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xc0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xc0000000 0x10000000>;
reg = <0xc0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -269,7 +270,7 @@
lm1: bus@d0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xd0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xd0000000 0x10000000>;
reg = <0xd0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -277,7 +278,7 @@
lm2: bus@e0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xe0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xe0000000 0x10000000>;
reg = <0xe0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -285,7 +286,7 @@
lm3: bus@f0000000 {
compatible = "simple-bus";
ranges = <0x00000000 0xf0000000 0x10000000>;
- dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ dma-ranges = <0x00000000 0xf0000000 0x10000000>;
reg = <0xf0000000 0x10000000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/mach-sunplus/Kconfig b/arch/arm/mach-sunplus/Kconfig
index 926cde5e3cd9..d0c2416e6f24 100644
--- a/arch/arm/mach-sunplus/Kconfig
+++ b/arch/arm/mach-sunplus/Kconfig
@@ -18,8 +18,8 @@ config SOC_SP7021
select ARM_PSCI
select PINCTRL
select PINCTRL_SPPCTL
- select SERIAL_SUNPLUS
- select SERIAL_SUNPLUS_CONSOLE
+ select SERIAL_SUNPLUS if TTY
+ select SERIAL_SUNPLUS_CONSOLE if TTY
help
Support for Sunplus SP7021 SoC. It is based on ARM 4-core
Cortex-A7 with various peripherals (e.g.: I2C, SPI, SDIO,
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 13d7f267b289..dac3b69e314f 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -3374,6 +3374,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc2 SLAVE_USB3_0 0>;
interconnect-names = "usb-ddr", "apps-usb";
+ wakeup-source;
+
usb_1_dwc3: usb@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
@@ -3384,7 +3386,6 @@
phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
phy-names = "usb2-phy", "usb3-phy";
maximum-speed = "super-speed";
- wakeup-source;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index 84dc92dda0b8..4c404e2eafba 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -235,13 +235,13 @@
};
&remoteproc_adsp {
- firmware-name = "qcom/sc8280xp/qcadsp8280.mbn";
+ firmware-name = "qcom/sc8280xp/LENOVO/21BX/qcadsp8280.mbn";
status = "okay";
};
&remoteproc_nsp0 {
- firmware-name = "qcom/sc8280xp/qccdsp8280.mbn";
+ firmware-name = "qcom/sc8280xp/LENOVO/21BX/qccdsp8280.mbn";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 7d509ecd44da..916f12b799b7 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -3394,57 +3394,49 @@
compute-cb@1 {
compatible = "qcom,fastrpc-compute-cb";
reg = <1>;
- iommus = <&apps_smmu 0x1401 0x2040>,
- <&apps_smmu 0x1421 0x0>,
- <&apps_smmu 0x2001 0x420>,
- <&apps_smmu 0x2041 0x0>;
+ iommus = <&apps_smmu 0x1001 0x0460>;
};
compute-cb@2 {
compatible = "qcom,fastrpc-compute-cb";
reg = <2>;
- iommus = <&apps_smmu 0x2 0x3440>,
- <&apps_smmu 0x22 0x3400>;
+ iommus = <&apps_smmu 0x1002 0x0460>;
};
compute-cb@3 {
compatible = "qcom,fastrpc-compute-cb";
reg = <3>;
- iommus = <&apps_smmu 0x3 0x3440>,
- <&apps_smmu 0x1423 0x0>,
- <&apps_smmu 0x2023 0x0>;
+ iommus = <&apps_smmu 0x1003 0x0460>;
};
compute-cb@4 {
compatible = "qcom,fastrpc-compute-cb";
reg = <4>;
- iommus = <&apps_smmu 0x4 0x3440>,
- <&apps_smmu 0x24 0x3400>;
+ iommus = <&apps_smmu 0x1004 0x0460>;
};
compute-cb@5 {
compatible = "qcom,fastrpc-compute-cb";
reg = <5>;
- iommus = <&apps_smmu 0x5 0x3440>,
- <&apps_smmu 0x25 0x3400>;
+ iommus = <&apps_smmu 0x1005 0x0460>;
};
compute-cb@6 {
compatible = "qcom,fastrpc-compute-cb";
reg = <6>;
- iommus = <&apps_smmu 0x6 0x3460>;
+ iommus = <&apps_smmu 0x1006 0x0460>;
};
compute-cb@7 {
compatible = "qcom,fastrpc-compute-cb";
reg = <7>;
- iommus = <&apps_smmu 0x7 0x3460>;
+ iommus = <&apps_smmu 0x1007 0x0460>;
};
compute-cb@8 {
compatible = "qcom,fastrpc-compute-cb";
reg = <8>;
- iommus = <&apps_smmu 0x8 0x3460>;
+ iommus = <&apps_smmu 0x1008 0x0460>;
};
/* note: secure cb9 in downstream */
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index e72a04411888..d9b08dfc2980 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -2128,7 +2128,7 @@
ufs_mem_phy: phy@1d87000 {
compatible = "qcom,sm8350-qmp-ufs-phy";
- reg = <0 0x01d87000 0 0xe10>;
+ reg = <0 0x01d87000 0 0x1c4>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
diff --git a/arch/loongarch/include/asm/loongson.h b/arch/loongarch/include/asm/loongson.h
index 6e8f6972ceb6..00db93edae1b 100644
--- a/arch/loongarch/include/asm/loongson.h
+++ b/arch/loongarch/include/asm/loongson.h
@@ -14,8 +14,6 @@
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
-extern const struct plat_smp_ops loongson3_smp_ops;
-
#define LOONGSON_REG(x) \
(*(volatile u32 *)((char *)TO_UNCACHE(LOONGSON_REG_BASE) + (x)))
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c60eb66793e3..331864369e49 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -14,6 +14,8 @@
__REF
+ .align 12
+
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index aa1c95aaf595..5010e95cef84 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -461,11 +461,9 @@ asmlinkage void noinstr do_watch(struct pt_regs *regs)
asmlinkage void noinstr do_ri(struct pt_regs *regs)
{
- int status = -1;
+ int status = SIGILL;
unsigned int opcode = 0;
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
- unsigned long old_era = regs->csr_era;
- unsigned long old_ra = regs->regs[1];
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
@@ -477,21 +475,12 @@ asmlinkage void noinstr do_ri(struct pt_regs *regs)
die_if_kernel("Reserved instruction in kernel code", regs);
- compute_return_era(regs);
-
if (unlikely(get_user(opcode, era) < 0)) {
status = SIGSEGV;
current->thread.error_code = 1;
}
- if (status < 0)
- status = SIGILL;
-
- if (unlikely(status > 0)) {
- regs->csr_era = old_era; /* Undo skip-over. */
- regs->regs[1] = old_ra;
- force_sig(status);
- }
+ force_sig(status);
out:
local_irq_disable();
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 698274109c91..e712f80fe189 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -937,15 +937,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd = *pmdp;
pmd_clear(pmdp);
- /*
- * pmdp collapse_flush need to ensure that there are no parallel gup
- * walk after this call. This is needed so that we can have stable
- * page ref count when collapsing a page. We don't allow a collapse page
- * if we have gup taken on the page. We can ensure that by sending IPI
- * because gup walk happens with IRQ disabled.
- */
- serialize_against_pte_lookup(vma->vm_mm);
-
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
return pmd;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index c601939a74b1..c20d8cd47c48 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2102,6 +2102,15 @@ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
+EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
+
+static struct attribute *grt_mem_attrs[] = {
+ EVENT_PTR(mem_ld_grt),
+ EVENT_PTR(mem_st_grt),
+ NULL
+};
+
static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
@@ -5975,6 +5984,36 @@ __init int intel_pmu_init(void)
name = "Tremont";
break;
+ case INTEL_FAM6_ALDERLAKE_N:
+ x86_pmu.mid_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_grt_extra_regs;
+
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.pebs_block = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
+
+ intel_pmu_pebs_data_source_grt();
+ x86_pmu.pebs_latency_data = adl_latency_data_small;
+ x86_pmu.get_event_constraints = tnt_get_event_constraints;
+ x86_pmu.limit_period = spr_limit_period;
+ td_attr = tnt_events_attrs;
+ mem_attr = grt_mem_attrs;
+ extra_attr = nhm_format_attr;
+ pr_cont("Gracemont events, ");
+ name = "gracemont";
+ break;
+
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
@@ -6317,7 +6356,6 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ALDERLAKE_N:
case INTEL_FAM6_RAPTORLAKE:
case INTEL_FAM6_RAPTORLAKE_P:
/*
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index de1f55d51784..ac973c6f82ad 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -110,13 +110,18 @@ void __init intel_pmu_pebs_data_source_skl(bool pmem)
__intel_pmu_pebs_data_source_skl(pmem, pebs_data_source);
}
-static void __init intel_pmu_pebs_data_source_grt(u64 *data_source)
+static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
{
data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
}
+void __init intel_pmu_pebs_data_source_grt(void)
+{
+ __intel_pmu_pebs_data_source_grt(pebs_data_source);
+}
+
void __init intel_pmu_pebs_data_source_adl(void)
{
u64 *data_source;
@@ -127,7 +132,7 @@ void __init intel_pmu_pebs_data_source_adl(void)
data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
- intel_pmu_pebs_data_source_grt(data_source);
+ __intel_pmu_pebs_data_source_grt(data_source);
}
static u64 precise_store_data(u64 status)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba3d24a6a4ec..266143abcbd8 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1516,6 +1516,8 @@ void intel_pmu_pebs_data_source_skl(bool pmem);
void intel_pmu_pebs_data_source_adl(void);
+void intel_pmu_pebs_data_source_grt(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index aeb38023a703..5d75fe229342 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -115,6 +115,9 @@
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
+#define INTEL_FAM6_METEORLAKE 0xAC
+#define INTEL_FAM6_METEORLAKE_L 0xAA
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 81a0211a372d..a73bced40e24 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,16 +21,6 @@ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
-static inline struct cpumask *cpu_llc_shared_mask(int cpu)
-{
- return per_cpu(cpu_llc_shared_map, cpu);
-}
-
-static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
-{
- return per_cpu(cpu_l2c_shared_map, cpu);
-}
-
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
@@ -172,6 +162,16 @@ extern int safe_smp_processor_id(void);
# define safe_smp_processor_id() smp_processor_id()
#endif
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+ return per_cpu(cpu_llc_shared_map, cpu);
+}
+
+static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
+{
+ return per_cpu(cpu_l2c_shared_map, cpu);
+}
+
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void)
@@ -179,6 +179,11 @@ static inline int wbinvd_on_all_cpus(void)
wbinvd();
return 0;
}
+
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+ return (struct cpumask *)cpumask_of(0);
+}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 62f6b8b7c4a5..4f3204364caa 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1319,22 +1319,23 @@ struct bp_patching_desc {
atomic_t refs;
};
-static struct bp_patching_desc *bp_desc;
+static struct bp_patching_desc bp_desc;
static __always_inline
-struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
+struct bp_patching_desc *try_get_desc(void)
{
- /* rcu_dereference */
- struct bp_patching_desc *desc = __READ_ONCE(*descp);
+ struct bp_patching_desc *desc = &bp_desc;
- if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
+ if (!arch_atomic_inc_not_zero(&desc->refs))
return NULL;
return desc;
}
-static __always_inline void put_desc(struct bp_patching_desc *desc)
+static __always_inline void put_desc(void)
{
+ struct bp_patching_desc *desc = &bp_desc;
+
smp_mb__before_atomic();
arch_atomic_dec(&desc->refs);
}
@@ -1367,15 +1368,15 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
/*
* Having observed our INT3 instruction, we now must observe
- * bp_desc:
+ * bp_desc with non-zero refcount:
*
- * bp_desc = desc INT3
+ * bp_desc.refs = 1 INT3
* WMB RMB
- * write INT3 if (desc)
+ * write INT3 if (bp_desc.refs != 0)
*/
smp_rmb();
- desc = try_get_desc(&bp_desc);
+ desc = try_get_desc();
if (!desc)
return 0;
@@ -1429,7 +1430,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
ret = 1;
out_put:
- put_desc(desc);
+ put_desc();
return ret;
}
@@ -1460,18 +1461,20 @@ static int tp_vec_nr;
*/
static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
{
- struct bp_patching_desc desc = {
- .vec = tp,
- .nr_entries = nr_entries,
- .refs = ATOMIC_INIT(1),
- };
unsigned char int3 = INT3_INSN_OPCODE;
unsigned int i;
int do_sync;
lockdep_assert_held(&text_mutex);
- smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
+ bp_desc.vec = tp;
+ bp_desc.nr_entries = nr_entries;
+
+ /*
+ * Corresponds to the implicit memory barrier in try_get_desc() to
+ * ensure reading a non-zero refcount provides up to date bp_desc data.
+ */
+ atomic_set_release(&bp_desc.refs, 1);
/*
* Corresponding read barrier in int3 notifier for making sure the
@@ -1559,12 +1562,10 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
text_poke_sync();
/*
- * Remove and synchronize_rcu(), except we have a very primitive
- * refcount based completion.
+ * Remove and wait for refs to be zero.
*/
- WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
- if (!atomic_dec_and_test(&desc.refs))
- atomic_cond_read_acquire(&desc.refs, !VAL);
+ if (!atomic_dec_and_test(&bp_desc.refs))
+ atomic_cond_read_acquire(&bp_desc.refs, !VAL);
}
static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 24c1bb8eb196..8bdeae2fc309 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -344,8 +344,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
}
va_page = sgx_encl_grow(encl, false);
- if (IS_ERR(va_page))
+ if (IS_ERR(va_page)) {
+ if (PTR_ERR(va_page) == -EBUSY)
+ vmret = VM_FAULT_NOPAGE;
goto err_out_epc;
+ }
if (va_page)
list_add(&va_page->list, &encl->va_pages);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 515e2a5f25bb..0aad028f04d4 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -49,9 +49,13 @@ static LIST_HEAD(sgx_dirty_page_list);
* Reset post-kexec EPC pages to the uninitialized state. The pages are removed
* from the input list, and made available for the page allocator. SECS pages
* prepending their children in the input list are left intact.
+ *
+ * Return 0 when sanitization was successful or kthread was stopped, and the
+ * number of unsanitized pages otherwise.
*/
-static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
+static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
{
+ unsigned long left_dirty = 0;
struct sgx_epc_page *page;
LIST_HEAD(dirty);
int ret;
@@ -59,7 +63,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
/* dirty_page_list is thread-local, no need for a lock: */
while (!list_empty(dirty_page_list)) {
if (kthread_should_stop())
- return;
+ return 0;
page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
@@ -92,12 +96,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
} else {
/* The page is not yet clean - move to the dirty list. */
list_move_tail(&page->list, &dirty);
+ left_dirty++;
}
cond_resched();
}
list_splice(&dirty, dirty_page_list);
+ return left_dirty;
}
static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
@@ -395,10 +401,7 @@ static int ksgxd(void *p)
* required for SECS pages, whose child pages blocked EREMOVE.
*/
__sgx_sanitize_pages(&sgx_dirty_page_list);
- __sgx_sanitize_pages(&sgx_dirty_page_list);
-
- /* sanity check: */
- WARN_ON(!list_empty(&sgx_dirty_page_list));
+ WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
while (!kthread_should_stop()) {
if (try_to_freeze())
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 4c1c2c06e96b..2796dde06302 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -902,8 +902,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
}
break;
- case 9:
- break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
union cpuid10_edx edx;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ad0139d25401..f1bb18617156 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -44,7 +44,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
* called from other contexts.
*/
pagefault_disable();
- ret = __copy_from_user_inatomic(to, from, n);
+ ret = raw_copy_from_user(to, from, n);
pagefault_enable();
return ret;
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index f8220fd2c169..829c1409ffbd 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -4,10 +4,12 @@ KCOV_INSTRUMENT_tlb.o := n
KCOV_INSTRUMENT_mem_encrypt.o := n
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
KCOV_INSTRUMENT_mem_encrypt_identity.o := n
+KCOV_INSTRUMENT_pgprot.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_amd.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
+KASAN_SANITIZE_pgprot.o := n
# Disable KCSAN entirely, because otherwise we get warnings that some functions
# reference __initdata sections.
@@ -17,6 +19,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
+CFLAGS_REMOVE_pgprot.o = -pg
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 16a1663d02d4..9f40917c49ef 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -531,10 +531,27 @@ static void wait_for_freeze(void)
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
+ /*
+ * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
+ * not this code. Assume that any Intel systems using this
+ * are ancient and may need the dummy wait. This also assumes
+ * that the motivating chipset issue was Intel-only.
+ */
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
#endif
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
+ /*
+ * Dummy wait op - must do something useless after P_LVL2 read
+ * because chipsets cannot guarantee that STPCLK# signal gets
+ * asserted in time to freeze execution properly
+ *
+ * This workaround has been in place since the original ACPI
+ * implementation was merged, circa 2002.
+ *
+ * If a profile is pointing to this instruction, please first
+ * consider moving your system to a more modern idle
+ * mechanism.
+ */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 826d41f341e4..c9a9aa607b62 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3988,6 +3988,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* These specific Pioneer models have LPM issues */
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 7a5fe41aa5ae..13b9d0fdd42c 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1018,26 +1018,25 @@ DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
/**
- * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
- * @ap: ATA port to which the device change the queue depth
+ * ata_change_queue_depth - Set a device maximum queue depth
+ * @ap: ATA port of the target device
+ * @dev: target ATA device
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
*
- * libsas and libata have different approaches for associating a sdev to
- * its ata_port.
+ * Helper to set a device maximum queue depth, usable with both libsas
+ * and libata.
*
*/
-int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth)
+int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth)
{
- struct ata_device *dev;
unsigned long flags;
- if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+ if (!dev || !ata_dev_enabled(dev))
return sdev->queue_depth;
- dev = ata_scsi_find_dev(ap, sdev);
- if (!dev || !ata_dev_enabled(dev))
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
/* NCQ enabled? */
@@ -1059,7 +1058,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
return scsi_change_queue_depth(sdev, queue_depth);
}
-EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+EXPORT_SYMBOL_GPL(ata_change_queue_depth);
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
@@ -1080,7 +1079,8 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- return __ata_change_queue_depth(ap, sdev, queue_depth);
+ return ata_change_queue_depth(ap, ata_scsi_find_dev(ap, sdev),
+ sdev, queue_depth);
}
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 29e2f55c6faa..ff9602a0e54e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1055,6 +1055,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
+ int depth = 1;
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
@@ -1100,13 +1101,10 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
- if (dev->flags & ATA_DFLAG_NCQ) {
- int depth;
-
+ if (dev->flags & ATA_DFLAG_NCQ)
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
- depth = min(ATA_MAX_QUEUE, depth);
- scsi_change_queue_depth(sdev, depth);
- }
+ depth = min(ATA_MAX_QUEUE, depth);
+ scsi_change_queue_depth(sdev, depth);
if (dev->flags & ATA_DFLAG_TRUSTED)
sdev->security_supported = 1;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30255fcaf181..dd9a05174726 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -322,14 +322,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(status))
return status;
- blk_mq_start_request(req);
-
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
if (unlikely(vbr->sg_table.nents < 0)) {
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ blk_mq_start_request(req);
+
return BLK_STS_OK;
}
@@ -391,8 +391,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
}
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist,
- struct request **requeue_list)
+ struct request **rqlist)
{
unsigned long flags;
int err;
@@ -408,7 +407,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
if (err) {
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
- rq_list_add(requeue_list, req);
+ blk_mq_requeue_request(req, true);
}
}
@@ -436,7 +435,7 @@ static void virtio_queue_rqs(struct request **rqlist)
if (!next || req->mq_hctx != next->mq_hctx) {
req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ kick = virtblk_add_req_batch(vq, rqlist);
if (kick)
virtqueue_notify(vq->vq);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 1a098db12062..680f9d8d357c 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -726,6 +726,7 @@ void iproc_pll_clk_setup(struct device_node *node,
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@@ -773,7 +774,12 @@ void iproc_pll_clk_setup(struct device_node *node,
iclk = &iclk_array[0];
iclk->pll = pll;
- init.name = node->name;
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
init.ops = &iproc_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
@@ -793,13 +799,11 @@ void iproc_pll_clk_setup(struct device_node *node,
goto err_pll_register;
clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
- const char *clk_name;
-
memset(&init, 0, sizeof(init));
- parent_name = node->name;
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index fc1bd23d4583..598f3cf4eba4 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index f5c9fa40491c..dcc41d178238 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -332,7 +332,7 @@ static struct platform_driver imx93_clk_driver = {
.driver = {
.name = "imx93-ccm",
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx93_clk_of_match),
+ .of_match_table = imx93_clk_of_match,
},
};
module_platform_driver(imx93_clk_driver);
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 201bf6e6b6e0..d5544cbc5c48 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -101,15 +101,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw)
bool enabled = false;
/*
- * If the SoC has no global TCU clock, we must ungate the channel's
- * clock to be able to access its registers.
- * If we have a TCU clock, it will be enabled automatically as it has
- * been attached to the regmap.
+ * According to the programming manual, a timer channel's registers can
+ * only be accessed when the channel's stop bit is clear.
*/
- if (!tcu->clk) {
- enabled = !!ingenic_tcu_is_enabled(hw);
- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
- }
+ enabled = !!ingenic_tcu_is_enabled(hw);
+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
return enabled;
}
@@ -120,8 +116,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw)
const struct ingenic_tcu_clk_info *info = tcu_clk->info;
struct ingenic_tcu *tcu = tcu_clk->tcu;
- if (!tcu->clk)
- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
}
static u8 ingenic_tcu_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 070c3b896559..b6b89413e090 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -239,6 +239,11 @@ static const struct clk_ops mpfs_clk_cfg_ops = {
.hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
+#define CLK_CPU_OFFSET 0u
+#define CLK_AXI_OFFSET 1u
+#define CLK_AHB_OFFSET 2u
+#define CLK_RTCREF_OFFSET 3u
+
static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0,
REG_CLOCK_CONFIG_CR),
@@ -362,7 +367,7 @@ static const struct clk_ops mpfs_periph_clk_ops = {
_flags), \
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT].hw)
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw)
/*
* Critical clocks:
@@ -370,6 +375,8 @@ static const struct clk_ops mpfs_periph_clk_ops = {
* trap handler
* - CLK_MMUART0: reserved by the hss
* - CLK_DDRC: provides clock to the ddr subsystem
+ * - CLK_RTC: the onboard RTC's AHB bus clock must be kept running as the rtc will stop
+ * if the AHB interface clock is disabled
* - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect)
* clock domain crossers which provide the interface to the FPGA fabric. Disabling them
* causes the FPGA fabric to go into reset.
@@ -394,7 +401,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0),
CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0),
CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0),
- CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, 0),
+ CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0),
CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0),
CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 30056da3e0af..42568c616181 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1191,9 +1191,13 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- /* Force PLL_GPU output divider bits to 0 */
+ /*
+ * Force PLL_GPU output divider bits to 0 and adjust
+ * multiplier to sensible default value of 432 MHz.
+ */
val = readl(reg + SUN50I_H6_PLL_GPU_REG);
- val &= ~BIT(0);
+ val &= ~(GENMASK(15, 8) | BIT(0));
+ val |= 17 << 8;
writel(val, reg + SUN50I_H6_PLL_GPU_REG);
/* Force GPU_CLK divider bits to 0 */
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2a60d0525cde..168195672e2e 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
+ kfree(vc_akcipher_req->src_buf);
+ kfree(vc_akcipher_req->dst_buf);
+ vc_akcipher_req->src_buf = NULL;
+ vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 4e27c3d66a83..0e05a79de82d 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
@@ -53,27 +52,6 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
return scmi_pd_power(domain, false);
}
-static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- int ret;
-
- ret = pm_clk_create(dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clks(dev);
- if (ret >= 0)
- return 0;
-
- pm_clk_destroy(dev);
- return ret;
-}
-
-static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- pm_clk_destroy(dev);
-}
-
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
@@ -124,10 +102,6 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
- scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
- scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK |
- GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index aa126ab80f0c..1bb317b8dcce 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -790,8 +790,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 offset;
u32 set;
- if (of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-gpio")) {
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ int ret = of_property_read_u32(dev->of_node,
+ "marvell,pwm-offset", &offset);
+ if (ret < 0)
+ return 0;
+ } else {
/*
* There are only two sets of PWM configuration registers for
* all the GPIO lines on those SoCs which this driver reserves
@@ -801,13 +805,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
return 0;
offset = 0;
- } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
- int ret = of_property_read_u32(dev->of_node,
- "marvell,pwm-offset", &offset);
- if (ret < 0)
- return 0;
- } else {
- return 0;
}
if (IS_ERR(mvchip->clk))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 130060834b4e..48bd660ddb85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1050,6 +1050,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index be7aff2d4a57..25e1f5ed7ead 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3152,7 +3152,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -4064,12 +4065,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+ if (r)
+ return r;
+ }
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
@@ -4093,6 +4102,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -4111,6 +4123,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4125,6 +4143,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
r = amdgpu_device_ip_resume(adev);
+
+ /* no matter what r is, always need to properly release full GPU */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
+
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8adeb7469f1e..d0d99ed607dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 23a696d38390..de5b936b016d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -304,6 +304,10 @@ struct amdgpu_gfx {
uint32_t rlc_srlg_feature_version;
uint32_t rlc_srls_fw_version;
uint32_t rlc_srls_feature_version;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t mec_feature_version;
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7b46f6bf4187..ad980f4b66e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -222,6 +222,8 @@ struct mes_add_queue_input {
uint64_t tba_addr;
uint64_t tma_addr;
uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
};
struct mes_remove_queue_input {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 6373bfb47d55..e23f6192c50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -272,3 +272,267 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ if (version_minor >= 0) {
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+ }
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 03ac36b2c2cf..23f060db9255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
-
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 96b6cf4c4d54..59edf32f775e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -260,8 +260,12 @@ struct rlc_firmware_header_v2_2 {
/* version_major=2, version_minor=3 */
struct rlc_firmware_header_v2_3 {
struct rlc_firmware_header_v2_2 v2_2;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
uint32_t rlcp_ucode_size_bytes;
uint32_t rlcp_ucode_offset_bytes;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t rlcv_ucode_size_bytes;
uint32_t rlcv_ucode_offset_bytes;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f36e4f08db6d..0b52af415b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = false;
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f6b1bb40e503..daf8ba8235cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -474,49 +474,6 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_3 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
- adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
- adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
- adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
-}
-
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -527,8 +484,6 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -583,58 +538,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v11_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v11_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 3)
- gfx_v11_0_init_rlcp_rlcv_microcode(adev);
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
@@ -769,60 +680,6 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
}
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
- }
}
out:
@@ -5260,6 +5117,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5273,6 +5132,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc9c1043244c..037af8352677 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -5597,7 +5597,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
- cur = (ring->wptr & ring->buf_mask) - 1;
+ cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index cc3fdbbcd314..f92744b8d79d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -185,6 +185,10 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 2e50db3b761e..6e564b549b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -625,6 +625,7 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e83725a28106..007a3db69df1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.is_kfd_process = 1;
+ queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
+ queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index a6fcbeeb7428..0d53f6067422 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
- NULL, 0))*/)
- return;
+ NULL, 0)))
+ return;*/
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index b8e14c2cc295..3ae350220d42 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..8ca10ab3dfc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- power_opt |= psr_power_opt_z10_static_screen;
+ /*
+ * Only enable static-screen optimizations for PSR1. For PSR SU, this
+ * causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+ * events.
+ */
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ power_opt |= psr_power_opt_z10_static_screen;
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 8559dcd80af0..4a15aa7a375f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -130,11 +130,20 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+
if (disable) {
+ if (stream_enc && stream_enc->funcs->disable_fifo)
+ pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
- } else
+ } else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+
+ if (stream_enc && stream_enc->funcs->enable_fifo)
+ pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index c6785969eb1a..f0f3f66629cc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
+ unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
+ clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
@@ -179,6 +181,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_levels);
+ num_dcfclk_levels = num_levels;
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
@@ -189,11 +192,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
+ num_dtbclk_levels = num_levels;
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_levels);
+ num_dispclk_levels = num_levels;
+
+ if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
unsigned int i;
@@ -658,6 +666,12 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ if (clk_mgr->dpm_present && !num_levels)
+ clk_mgr->dpm_present = false;
+
+ if (!clk_mgr->dpm_present)
+ dcn32_patch_dpm_table(clk_mgr_base->bw_params);
+
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index aea49334021c..38a67051d470 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2164,8 +2164,7 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
- if (pipe_ctx->stream_res.audio != NULL &&
- pipe_ctx->stream_res.audio->enabled == false) {
+ if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2205,8 +2204,7 @@ static void dce110_setup_audio_dto(
if (!dc_is_dp_signal(pipe_ctx->stream->signal))
continue;
- if (pipe_ctx->stream_res.audio != NULL &&
- pipe_ctx->stream_res.audio->enabled == false) {
+ if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 232cc15979dd..fb729674953b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
+ return;
switch (otg_inst) {
case 0:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 06d8638db696..8c0ab013764e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
@@ -261,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+void enc314_stream_encoder_dp_blank(
+ struct dc_link *link,
+ struct stream_encoder *enc)
+{
+ /* New to DCN314 - disable the FIFO before VID stream disable. */
+ enc314_disable_fifo(enc);
+
+ enc1_stream_encoder_dp_blank(link, enc);
+}
+
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -316,15 +327,11 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
- /* DIG Resync FIFO now needs to be explicitly enabled. */
- enc314_enable_fifo(enc);
-
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
@@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+ /*
+ * DIG Resync FIFO now needs to be explicitly enabled.
+ * This should come after DP_VID_STREAM_ENABLE per HW docs.
+ */
+ enc314_enable_fifo(enc);
+
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
@@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
- enc1_stream_encoder_dp_blank,
+ enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index 0d5e8a441512..6640d0ac4304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg32_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ return;
+
+ dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == cur_k1 && k2 == cur_k2)
+ return;
+
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 99eb239bbc7b..9aebc1be2f59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
- /* Should never be hit, if it is we have an erroneous hw config*/
- ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
- + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+ if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
+ /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
+ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
+ hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
+ hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
+ }
}
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f43686997917..e573e706430d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
},
},
.num_states = 1,
- .sr_exit_time_us = 20.16,
- .sr_enter_plus_exit_time_us = 27.13,
+ .sr_exit_time_us = 42.97,
+ .sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
@@ -1926,6 +1926,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
+{
+ int i;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
+ max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
+ max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
+
+ /* Scan through clock values we currently have and if they are 0,
+ * then populate it with dcn3_2_soc.clock_limits[] value.
+ *
+ * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
+ * 0, will cause it to skip building the clock table.
+ */
+ if (max_dcfclk_mhz == 0)
+ bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ if (max_dispclk_mhz == 0)
+ bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
+ if (max_dtbclk_mhz == 0)
+ bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
+ if (max_uclk_mhz == 0)
+ bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
+}
+
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 6ce221098979..e1b79e2aab8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -77,4 +77,6 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
int pipe_cnt,
int vlevel);
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 68c2ed434d2c..cff5fd55a0ad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -340,6 +340,8 @@ struct clk_mgr_internal {
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
+
+ bool dpm_present;
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 50bfa513cb35..7e85cdc5bd34 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -269,7 +269,8 @@ union MESAPI__ADD_QUEUE {
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
uint32_t trap_en : 1;
- uint32_t reserved : 21;
+ uint32_t is_aql_queue : 1;
+ uint32_t reserved : 20;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 096327513dd0..1d454485e0d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
if (num > 2)
return -EINVAL;
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
- if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
- (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
-#if 0
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
-#endif
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
-
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
-
- if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 8aadcc0aa90b..df9370e0ff23 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 28bad30dc4e5..5968f4af190b 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -188,7 +188,7 @@ static int lt8912_write_lvds_config(struct lt8912 *lt)
{0x03, 0xff},
};
- return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
+ return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq));
};
static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b)
@@ -268,7 +268,7 @@ static int lt8912_video_setup(struct lt8912 *lt)
u32 hactive, h_total, hpw, hfp, hbp;
u32 vactive, v_total, vpw, vfp, vbp;
u8 settle = 0x08;
- int ret;
+ int ret, hsync_activehigh, vsync_activehigh;
if (!lt)
return -EINVAL;
@@ -278,12 +278,14 @@ static int lt8912_video_setup(struct lt8912 *lt)
hpw = lt->mode.hsync_len;
hbp = lt->mode.hback_porch;
h_total = hactive + hfp + hpw + hbp;
+ hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH;
vactive = lt->mode.vactive;
vfp = lt->mode.vfront_porch;
vpw = lt->mode.vsync_len;
vbp = lt->mode.vback_porch;
v_total = vactive + vfp + vpw + vbp;
+ vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH;
if (vactive <= 600)
settle = 0x04;
@@ -317,6 +319,13 @@ static int lt8912_video_setup(struct lt8912 *lt)
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0),
+ vsync_activehigh ? BIT(0) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1),
+ hsync_activehigh ? BIT(1) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0),
+ lt->connector.display_info.is_hdmi ? BIT(0) : 0);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 633a7e5dba3b..6b5d4ea22b67 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -166,6 +166,21 @@ struct intel_engine_execlists {
struct timer_list preempt;
/**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
* @ccid: identifier for contexts submitted to this engine
*/
u32 ccid;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4b909cb88cdf..c718e6dc40b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq)
return 0;
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 73a8b46e0234..d09a0e845d09 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
-static const struct attribute *freq_attrs[] = {
- &dev_attr_punit_req_freq_mhz.attr,
+static const struct attribute *throttle_reason_attrs[] = {
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
@@ -763,12 +762,20 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (!is_object_gt(kobj))
return;
- ret = sysfs_create_files(kobj, freq_attrs);
+ ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
if (ret)
drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
+ "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
gt->info.id, ERR_PTR(ret));
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+ }
+
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 93446b21f98f..db793a550c25 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -77,6 +77,7 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to read switch code: %d\n",
ret);
+ fwnode_handle_put(child);
return ret;
}
iqs62x_keys->switches[i].code = val;
@@ -90,6 +91,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
IQS62X_EVENT_HALL_N_T :
IQS62X_EVENT_HALL_S_T);
+
+ fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 65286762b02a..ad8660be0127 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -20,7 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#define SNVS_HPVIDR1_REG 0xF8
+#define SNVS_HPVIDR1_REG 0xBF8
#define SNVS_LPSR_REG 0x4C /* LP Status Register */
#define SNVS_LPCR_REG 0x38 /* LP Control Register */
#define SNVS_HPSR_REG 0x14
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 434d48ae4b12..ffad142801b3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -186,7 +186,6 @@ static const char * const smbus_pnp_ids[] = {
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
- "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
"LEN2068", /* T14 Gen 1 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 2745bf1aee38..83f4be05e27b 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
"ce", GPIOD_OUT_LOW);
if (IS_ERR(ts->gpio_ce)) {
error = PTR_ERR(ts->gpio_ce);
- if (error != EPROBE_DEFER)
+ if (error != -EPROBE_DEFER)
dev_err(&client->dev,
"Failed to get gpio: %d\n", error);
return error;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 66b9fa408bf2..eb5ea5b69cfa 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -561,6 +561,11 @@ config IRQ_LOONGARCH_CPU
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select LOONGSON_LIOINTC
+ select LOONGSON_EIOINTC
+ select LOONGSON_PCH_PIC
+ select LOONGSON_PCH_MSI
+ select LOONGSON_PCH_LPC
help
Support for the LoongArch CPU Interrupt Controller. For details of
irq chip hierarchy on LoongArch platforms please read the document
@@ -623,8 +628,9 @@ config LOONGSON_PCH_MSI
config LOONGSON_PCH_LPC
bool "Loongson PCH LPC Controller"
+ depends on LOONGARCH
depends on MACH_LOONGSON64
- default (MACH_LOONGSON64 && LOONGARCH)
+ default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
help
Support for the Loongson PCH LPC Controller.
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5ff09de6c48f..beead1a0191c 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1574,13 +1574,15 @@ static int its_select_cpu(struct irq_data *d,
const struct cpumask *aff_mask)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- cpumask_var_t tmpmask;
+ static DEFINE_RAW_SPINLOCK(tmpmask_lock);
+ static struct cpumask __tmpmask;
+ struct cpumask *tmpmask;
+ unsigned long flags;
int cpu, node;
-
- if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
- return -ENOMEM;
-
node = its_dev->its->numa_node;
+ tmpmask = &__tmpmask;
+
+ raw_spin_lock_irqsave(&tmpmask_lock, flags);
if (!irqd_affinity_is_managed(d)) {
/* First try the NUMA node */
@@ -1634,7 +1636,7 @@ static int its_select_cpu(struct irq_data *d,
cpu = cpumask_pick_least_loaded(d, tmpmask);
}
out:
- free_cpumask_var(tmpmask);
+ raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
return cpu;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a73763d475f0..6a3f7498ea8e 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,7 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
- if (!host_data->drv_data || !host_data->drv_data->desc_irqs)
+ if (!host_data->drv_data->desc_irqs)
return -EINVAL;
desc_irq = host_data->drv_data->desc_irqs[hwirq];
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index a1bd6d9c9223..909df82fed33 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -354,6 +354,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
vb2_core_querybuf(&ctx->vb_q, b->index, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
@@ -378,8 +384,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
int ret;
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index 95e8c29ccc65..d2f5f30582a9 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -228,7 +228,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev;
struct video_device *vfd_enc;
- struct resource *res;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
int ret;
@@ -272,14 +271,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get irq resource");
- ret = -ENOENT;
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ if (dev->enc_irq < 0) {
+ ret = dev->enc_irq;
goto err_res;
}
- dev->enc_irq = platform_get_irq(pdev, 0);
irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->enc_irq,
mtk_vcodec_enc_irq_handler,
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9c05776f11d1..d509a4a2f08e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2740,7 +2740,7 @@ static const struct usb_device_id uvc_ids[] = {
.idProduct = 0x4034,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 0,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* LogiLink Wireless Webcam */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 0f3d6b5667b0..55c26e7d370e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1040,6 +1040,8 @@ int v4l2_compat_get_array_args(struct file *file, void *mbuf,
{
int err = 0;
+ memset(mbuf, 0, array_size);
+
switch (cmd) {
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c314025d977e..e6fd355a2e92 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2872,9 +2872,9 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
- IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
@@ -3367,8 +3367,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (array_buf == NULL)
- goto out_array_args;
- err = -EFAULT;
+ goto out;
if (in_compat_syscall())
err = v4l2_compat_get_array_args(file, array_buf,
user_ptr, array_size,
@@ -3377,7 +3376,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
err = copy_from_user(array_buf, user_ptr, array_size) ?
-EFAULT : 0;
if (err)
- goto out_array_args;
+ goto out;
*kernel_ptr = array_buf;
}
@@ -3395,6 +3394,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
trace_v4l2_qbuf(video_devdata(file)->minor, parg);
}
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ */
+ if (err < 0 && !always_copy)
+ goto out;
+
if (has_array_args) {
*kernel_ptr = (void __force *)user_ptr;
if (in_compat_syscall()) {
@@ -3409,16 +3415,8 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
} else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
}
- goto out_array_args;
}
- /*
- * Some ioctls can return an error, but still have valid
- * results that must be returned.
- */
- if (err < 0 && !always_copy)
- goto out;
-out_array_args:
if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
err = -EFAULT;
out:
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 06aa62ce0ed1..3662bf5320ce 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -870,7 +870,8 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index a5e05ed0fda3..9d35453e7371 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -34,7 +34,7 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
- if (hsq->mrq) {
+ if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b6eb75f4bbfc..dfc3ffd5b1f8 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -524,9 +524,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -651,16 +648,8 @@ static int moxart_probe(struct platform_device *pdev)
dmaengine_slave_config(host->dma_chan_rx, &cfg);
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7689ffec5ad1..251172890af7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3928,7 +3928,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, CMD_CRC);
} else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
@@ -3938,7 +3938,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index f23a03300a81..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index dc8132862f33..d6605dbb7737 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_packets += pkts;
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 835807911be0..409d5c3d76ea 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -506,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -592,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -2326,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -2887,8 +2896,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 66c7d08d376a..a2897549f9c4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5109,6 +5109,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -5198,6 +5199,9 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
phylink_start(bp->phylink);
rtnl_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 97453d1dfafe..dd2285d4bef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1467,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
+ wd = ice_xmit_zc(tx_ring);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 03ce85f6e6df..056c904b83cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -392,13 +392,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
- !is_power_of_2(vsi->tx_rings[qid]->count)) {
- netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
- pool_failure = -EINVAL;
- goto failure;
- }
-
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
@@ -534,11 +527,10 @@ exit:
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
- u16 batched, leftover, i, tail_bumps;
+ u16 leftover, i, tail_bumps;
- batched = ALIGN_DOWN(count, rx_thresh);
- tail_bumps = batched / rx_thresh;
- leftover = count & (rx_thresh - 1);
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
@@ -788,69 +780,57 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
}
/**
- * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
- * @xdp_ring: XDP ring to clean
- * @napi_budget: amount of descriptors that NAPI allows us to clean
- *
- * Returns count of cleaned descriptors
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
*/
-static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- int budget = napi_budget / tx_thresh;
- u16 next_dd = xdp_ring->next_dd;
- u16 ntc, cleared_dds = 0;
-
- do {
- struct ice_tx_desc *next_dd_desc;
- u16 desc_cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames;
- u16 i;
-
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
- cleared_dds++;
- xsk_frames = 0;
- if (likely(!xdp_ring->xdp_tx_active)) {
- xsk_frames = tx_thresh;
- goto skip;
- }
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if ((tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ if (last_rs >= ntc)
+ xsk_frames = last_rs - ntc + 1;
+ else
+ xsk_frames = last_rs + cnt - ntc + 1;
+ }
- ntc = xdp_ring->next_to_clean;
+ if (!xsk_frames)
+ return;
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ if (likely(!xdp_ring->xdp_tx_active))
+ goto skip;
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < xsk_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
}
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
skip:
- xdp_ring->next_to_clean += tx_thresh;
- if (xdp_ring->next_to_clean >= desc_cnt)
- xdp_ring->next_to_clean -= desc_cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- next_dd_desc->cmd_type_offset_bsz = 0;
- next_dd = next_dd + tx_thresh;
- if (next_dd >= desc_cnt)
- next_dd = tx_thresh - 1;
- } while (--budget);
-
- xdp_ring->next_dd = next_dd;
-
- return cleared_dds * tx_thresh;
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += xsk_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
/**
@@ -885,7 +865,6 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
u32 i;
@@ -905,13 +884,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
}
xdp_ring->next_to_use = ntu;
-
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
}
/**
@@ -924,7 +896,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
u32 nb_pkts, unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
@@ -933,54 +904,54 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+}
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- struct ice_tx_desc *tx_desc;
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ */
+static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
- * @budget: number of free descriptors on HW Tx ring that can be used
- * @napi_budget: amount of descriptors that NAPI allows us to clean
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
{
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
+ int budget;
+
+ ice_clean_xdp_irq_zc(xdp_ring);
- if (budget < tx_thresh)
- budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
- struct ice_tx_desc *tx_desc;
-
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
xdp_ring->next_to_use = 0;
}
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
&total_bytes);
+ ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
@@ -1058,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 4edbe81eb646..6fa181f080ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -26,13 +26,10 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool
-ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
- u32 __always_unused budget,
- int __always_unused napi_budget)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
{
return false;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 0eec05d905eb..4a3baa7e0142 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- static struct dentry *mvpp2_root;
- struct dentry *mvpp2_dir;
+ struct dentry *mvpp2_dir, *mvpp2_root;
int ret, i;
+ mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index ecf85e9ed824..0f9668a4079d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -319,8 +319,8 @@
#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
-#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
-#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
/* PDMA V2 descriptor rxd3 */
#define RX_DMA_VTAG_V2 BIT(0)
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 4aeb927c3715..aa780b1614a3 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -246,8 +246,8 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
}
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
- if (IS_ERR(priv->clk_io))
- return PTR_ERR(priv->clk_io);
+ if (!priv->clk_io)
+ return -ENOMEM;
mlxbf_gige_mdio_cfg(priv);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 306026e6aa11..8f8005bc6eea 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -290,6 +290,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
if (!(vlan->portmask & BIT(port)))
continue;
+ /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
+ * because this is never active in hardware at the same time as
+ * the bridge VLANs, which only matter in VLAN-aware mode.
+ */
+ if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
+ continue;
+
if (vlan->untagged & BIT(port))
num_untagged++;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ee734b69150f..d1e1aa19a68e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4213,7 +4213,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 592d29abcb1c..9083159b93f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3801,6 +3801,15 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
+ }
+
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -3904,6 +3913,10 @@ static int stmmac_release(struct net_device *dev)
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -7293,14 +7306,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@@ -7315,8 +7320,6 @@ int stmmac_dvr_probe(struct device *device,
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_xpcs_setup:
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 74e845fa2e07..aa8f828a0ae7 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
pci_release_regions(pdev);
+ pci_disable_device(pdev);
out2:
free_netdev(dev);
out3:
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 12ff276b80ae..4df8c337221b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,11 +316,13 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
- /* If we manged to get here with the PHY state machine in a state neither
- * PHY_HALTED nor PHY_READY this is an indication that something went wrong
- * and we should most likely be using MAC managed PM and we are not.
+ /* If we managed to get here with the PHY state machine in a state
+ * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication
+ * that something went wrong and we should most likely be using
+ * MAC managed PM, but we are not.
*/
- WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY &&
+ phydev->state != PHY_UP);
ret = phy_init_hw(phydev);
if (ret < 0)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 259b2b84b2b3..db736b944016 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2828,7 +2828,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
rcu_assign_pointer(tfile->tun, tun);
}
- netif_carrier_on(tun->dev);
+ if (ifr->ifr_flags & IFF_NO_CARRIER)
+ netif_carrier_off(tun->dev);
+ else
+ netif_carrier_on(tun->dev);
/* Make sure persistent devices do not get stuck in
* xoff state.
@@ -3056,8 +3059,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
- return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
- (unsigned int __user*)argp);
+ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
+ TUN_FEATURES, (unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE) {
return tun_set_queue(file, &ifr);
} else if (cmd == SIOCGSKNS) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 0cb187def5bc..26c34a7c21bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1402,6 +1402,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aaa89b4cfd50..e368b0780753 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1598,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf)
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1614,7 +1615,11 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- usb_scuttle_anchored_urbs(&dev->deferred);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
if (dev->driver_info->unbind)
dev->driver_info->unbind(dev, intf);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 66446f1e06cf..8d5a7ae19844 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2162,14 +2162,14 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 98864b853eef..67d3335e9cc8 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3470,6 +3470,10 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 185a333df66c..d2408725eb2c 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MP_RESET_PCIEPHY_PERST:
value = assert ? 0 : bit;
break;
}
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index 00b612a0effa..f3528dd1d084 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -33,11 +33,8 @@ static struct regmap_config sparx5_reset_regmap_config = {
.reg_stride = 4,
};
-static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int sparx5_switch_reset(struct mchp_reset_context *ctx)
{
- struct mchp_reset_context *ctx =
- container_of(rcdev, struct mchp_reset_context, rcdev);
u32 val;
/* Make sure the core is PROTECTED from reset */
@@ -54,8 +51,14 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
1, 100);
}
+static int sparx5_reset_noop(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
static const struct reset_control_ops sparx5_reset_ops = {
- .reset = sparx5_switch_reset,
+ .reset = sparx5_reset_noop,
};
static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
@@ -122,6 +125,11 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
ctx->rcdev.of_node = dn;
ctx->props = device_get_match_data(&pdev->dev);
+ /* Issue the reset very early, our actual reset callback is a noop. */
+ err = sparx5_switch_reset(ctx);
+ if (err)
+ return err;
+
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
@@ -163,6 +171,10 @@ static int __init mchp_sparx5_reset_init(void)
return platform_driver_register(&mchp_sparx5_reset_driver);
}
+/*
+ * Because this is a global reset, keep this postcore_initcall() to issue the
+ * reset as early as possible during the kernel startup.
+ */
postcore_initcall(mchp_sparx5_reset_init);
MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
index 24c55efa98e5..f2333506b0a6 100644
--- a/drivers/reset/reset-npcm.c
+++ b/drivers/reset/reset-npcm.c
@@ -291,7 +291,7 @@ static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
iprst2 |= ipsrst2_bits;
iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
NPCM_IPSRST3_USBPHY2);
- iprst2 |= ipsrst4_bits;
+ iprst4 |= ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9c82e5dc4fcc..a36fa1c128a8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -872,7 +872,8 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
- return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+ return ata_change_queue_depth(dev->sata_dev.ap,
+ sas_to_ata_dev(dev), sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 1467bbd59690..e1d7b4543248 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -288,7 +288,6 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
- of_node_put(np);
return ret;
}
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index a8f3876963a0..09754cd1d57d 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = {
static struct sunxi_sram_desc sun50i_a64_sram_c = {
.data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
- SUNXI_SRAM_MAP(0, 1, "cpu"),
- SUNXI_SRAM_MAP(1, 0, "de2")),
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
};
static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,6 +254,7 @@ int sunxi_sram_claim(struct device *dev)
writel(val | ((device << sram_data->offset) & mask),
base + sram_data->reg);
+ sram_desc->claimed = true;
spin_unlock(&sram_lock);
return 0;
@@ -329,11 +330,11 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
.writeable_reg = sunxi_sram_regmap_accessible_reg,
};
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
{
- struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
sram_dev = &pdev->dev;
@@ -345,13 +346,6 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
- &sunxi_sram_fops);
- if (!d)
- return -ENOMEM;
-
if (variant->num_emac_clocks > 0) {
emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
&sunxi_sram_emac_clock_regmap);
@@ -360,6 +354,10 @@ static int sunxi_sram_probe(struct platform_device *pdev)
return PTR_ERR(emac_clock);
}
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
+
return 0;
}
@@ -409,9 +407,8 @@ static struct platform_driver sunxi_sram_driver = {
.name = "sunxi-sram",
.of_match_table = sunxi_sram_dt_match,
},
- .probe = sunxi_sram_probe,
};
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 4af5a831bde0..4fc167b42cf0 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -1162,8 +1162,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
+ writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index c63c1f4ff9dc..77d7f07ca075 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2413,6 +2413,7 @@ int tb_switch_configure(struct tb_switch *sw)
* additional capabilities.
*/
sw->config.cmuv = USB4_VERSION_1_0;
+ sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 166b5bde45cb..6c14a79279f9 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
- child = of_get_child_by_name(node, "dwc3");
+ child = of_get_child_by_name(node, "usb");
if (!child) {
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 23ab3b048d9b..251778d14e2d 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -76,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
@@ -118,6 +132,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
+ "Thinkplus",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 7f2624f42724..6364f0d467ea 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -588,8 +588,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
- if (ret == 0 && offset == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
return ret;
}
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 75a703b803a2..3e4486bfa0b7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = vp_ioread16(avail_idx_addr);
@@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
vp_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index ed100a35e596..90913365def4 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
+ int rqt_table_size = roundup_pow_of_two(ndev->rqt_size);
+ int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
+ int act_sz = roundup_pow_of_two(num / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j = j + 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 41c0b29739f1..35dceee3ed56 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- if (offset > dev->config_size ||
- len > dev->config_size - offset)
+ /* Initialize the buffer in case of partial copy. */
+ memset(buf, 0, len);
+
+ if (offset > dev->config_size)
return;
+ if (len > dev->config_size - offset)
+ len = dev->config_size - offset;
+
memcpy(buf, dev->config + offset, len);
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 9f4aae202109..1ab4f5b76a1e 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -832,6 +832,38 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
}
}
+static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+{
+ struct bio_vec bvec = {
+ .bv_page = page,
+ .bv_offset = 0,
+ .bv_len = PAGE_SIZE,
+ };
+ struct iov_iter iter;
+ struct file *file = cprm->file;
+ loff_t pos = file->f_pos;
+ ssize_t n;
+
+ if (cprm->to_skip) {
+ if (!__dump_skip(cprm, cprm->to_skip))
+ return 0;
+ cprm->to_skip = 0;
+ }
+ if (cprm->written + PAGE_SIZE > cprm->limit)
+ return 0;
+ if (dump_interrupted())
+ return 0;
+ iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE);
+ n = __kernel_write_iter(cprm->file, &iter, &pos);
+ if (n != PAGE_SIZE)
+ return 0;
+ file->f_pos = pos;
+ cprm->written += PAGE_SIZE;
+ cprm->pos += PAGE_SIZE;
+
+ return 1;
+}
+
int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
{
if (cprm->to_skip) {
@@ -863,7 +895,6 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page;
- int stop;
/*
* To avoid having to allocate page tables for virtual address
@@ -874,10 +905,7 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
*/
page = get_dump_page(addr);
if (page) {
- void *kaddr = kmap_local_page(page);
-
- stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
- kunmap_local(kaddr);
+ int stop = !dump_emit_page(cprm, page);
put_page(page);
if (stop)
return 0;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 71f5b67d7f28..9dad93059945 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -910,7 +910,7 @@ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_group_info *grp, *iter;
+ struct ext4_group_info *grp = NULL, *iter;
int i;
if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
@@ -927,7 +927,6 @@ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
continue;
}
- grp = NULL;
list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
bb_avg_fragment_size_node) {
if (sbi->s_mb_stats)
diff --git a/fs/internal.h b/fs/internal.h
index 87e96b9024ce..3e206d3e317c 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -16,6 +16,7 @@ struct shrink_control;
struct fs_context;
struct user_namespace;
struct pipe_inode_info;
+struct iov_iter;
/*
* block/bdev.c
@@ -221,3 +222,5 @@ ssize_t do_getxattr(struct user_namespace *mnt_userns,
int setxattr_copy(const char __user *name, struct xattr_ctx *ctx);
int do_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct xattr_ctx *ctx);
+
+ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 5ae8de09b271..001f4e053c85 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2092,7 +2092,8 @@ get_ctx_vol_failed:
// TODO: Initialize security.
/* Get the extended system files' directory inode. */
vol->extend_ino = ntfs_iget(sb, FILE_Extend);
- if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
+ if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) ||
+ !S_ISDIR(vol->extend_ino->i_mode)) {
if (!IS_ERR(vol->extend_ino))
iput(vol->extend_ino);
ntfs_error(sb, "Failed to load $Extend.");
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b2fd3c20e7c2..0c034ea39954 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -28,14 +28,11 @@
#include <linux/crypto.h>
#include <linux/string.h>
#include <linux/timer.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
-#include <crypto/acompress.h>
-
#include "internal.h"
/*
@@ -93,8 +90,7 @@ module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "compression to use");
/* Compression parameters */
-static struct crypto_acomp *tfm;
-static struct acomp_req *creq;
+static struct crypto_comp *tfm;
struct pstore_zbackend {
int (*zbufsize)(size_t size);
@@ -272,21 +268,12 @@ static const struct pstore_zbackend zbackends[] = {
static int pstore_compress(const void *in, void *out,
unsigned int inlen, unsigned int outlen)
{
- struct scatterlist src, dst;
int ret;
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
return -EINVAL;
- sg_init_table(&src, 1);
- sg_set_buf(&src, in, inlen);
-
- sg_init_table(&dst, 1);
- sg_set_buf(&dst, out, outlen);
-
- acomp_request_set_params(creq, &src, &dst, inlen, outlen);
-
- ret = crypto_acomp_compress(creq);
+ ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
if (ret) {
pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
return ret;
@@ -297,7 +284,7 @@ static int pstore_compress(const void *in, void *out,
static void allocate_buf_for_compression(void)
{
- struct crypto_acomp *acomp;
+ struct crypto_comp *ctx;
int size;
char *buf;
@@ -309,7 +296,7 @@ static void allocate_buf_for_compression(void)
if (!psinfo || tfm)
return;
- if (!crypto_has_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC)) {
+ if (!crypto_has_comp(zbackend->name, 0, 0)) {
pr_err("Unknown compression: %s\n", zbackend->name);
return;
}
@@ -328,24 +315,16 @@ static void allocate_buf_for_compression(void)
return;
}
- acomp = crypto_alloc_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR_OR_NULL(acomp)) {
+ ctx = crypto_alloc_comp(zbackend->name, 0, 0);
+ if (IS_ERR_OR_NULL(ctx)) {
kfree(buf);
pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
- PTR_ERR(acomp));
- return;
- }
-
- creq = acomp_request_alloc(acomp);
- if (!creq) {
- crypto_free_acomp(acomp);
- kfree(buf);
- pr_err("acomp_request_alloc('%s') failed\n", zbackend->name);
+ PTR_ERR(ctx));
return;
}
/* A non-NULL big_oops_buf indicates compression is available. */
- tfm = acomp;
+ tfm = ctx;
big_oops_buf_sz = size;
big_oops_buf = buf;
@@ -355,8 +334,7 @@ static void allocate_buf_for_compression(void)
static void free_buf_for_compression(void)
{
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
- acomp_request_free(creq);
- crypto_free_acomp(tfm);
+ crypto_free_comp(tfm);
tfm = NULL;
}
kfree(big_oops_buf);
@@ -693,8 +671,6 @@ static void decompress_record(struct pstore_record *record)
int ret;
int unzipped_len;
char *unzipped, *workspace;
- struct acomp_req *dreq;
- struct scatterlist src, dst;
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
return;
@@ -718,30 +694,16 @@ static void decompress_record(struct pstore_record *record)
if (!workspace)
return;
- dreq = acomp_request_alloc(tfm);
- if (!dreq) {
- kfree(workspace);
- return;
- }
-
- sg_init_table(&src, 1);
- sg_set_buf(&src, record->buf, record->size);
-
- sg_init_table(&dst, 1);
- sg_set_buf(&dst, workspace, unzipped_len);
-
- acomp_request_set_params(dreq, &src, &dst, record->size, unzipped_len);
-
/* After decompression "unzipped_len" is almost certainly smaller. */
- ret = crypto_acomp_decompress(dreq);
+ ret = crypto_comp_decompress(tfm, record->buf, record->size,
+ workspace, &unzipped_len);
if (ret) {
- pr_err("crypto_acomp_decompress failed, ret = %d!\n", ret);
+ pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
kfree(workspace);
return;
}
/* Append ECC notice to decompressed buffer. */
- unzipped_len = dreq->dlen;
memcpy(workspace + unzipped_len, record->buf + record->size,
record->ecc_notice_size);
@@ -749,7 +711,6 @@ static void decompress_record(struct pstore_record *record)
unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
GFP_KERNEL);
kfree(workspace);
- acomp_request_free(dreq);
if (!unzipped)
return;
diff --git a/fs/read_write.c b/fs/read_write.c
index 1a261dcf1778..328ce8cf9a85 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -496,14 +496,9 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
}
/* caller is responsible for file_start_write/file_end_write */
-ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
+ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos)
{
- struct kvec iov = {
- .iov_base = (void *)buf,
- .iov_len = min_t(size_t, count, MAX_RW_COUNT),
- };
struct kiocb kiocb;
- struct iov_iter iter;
ssize_t ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
@@ -519,8 +514,7 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = pos ? *pos : 0;
- iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
- ret = file->f_op->write_iter(&kiocb, &iter);
+ ret = file->f_op->write_iter(&kiocb, from);
if (ret > 0) {
if (pos)
*pos = kiocb.ki_pos;
@@ -530,6 +524,18 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
inc_syscw(current);
return ret;
}
+
+/* caller is responsible for file_start_write/file_end_write */
+ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
+{
+ struct kvec iov = {
+ .iov_base = (void *)buf,
+ .iov_len = min_t(size_t, count, MAX_RW_COUNT),
+ };
+ struct iov_iter iter;
+ iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
+ return __kernel_write_iter(file, &iter, pos);
+}
/*
* This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()",
* but autofs is one of the few internal kernel users that actually
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index 69d9c83ea4b2..5b1f9a24ed59 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -175,13 +175,13 @@ xfs_dax_notify_failure(
u64 ddev_start;
u64 ddev_end;
- if (!(mp->m_sb.sb_flags & SB_BORN)) {
+ if (!(mp->m_super->s_flags & SB_BORN)) {
xfs_warn(mp, "filesystem is not ready for notify_failure()!");
return -EIO;
}
if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) {
- xfs_warn(mp,
+ xfs_debug(mp,
"notify_failure() not supported on realtime device!");
return -EOPNOTSUPP;
}
@@ -194,7 +194,7 @@ xfs_dax_notify_failure(
}
if (!xfs_has_rmapbt(mp)) {
- xfs_warn(mp, "notify_failure() needs rmapbt enabled!");
+ xfs_debug(mp, "notify_failure() needs rmapbt enabled!");
return -EOPNOTSUPP;
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 698032e5ef2d..20765d1c5f80 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1136,8 +1136,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6257867fbf95..567f12323f55 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1788,42 +1788,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_unlock();
}
-/**
- * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object.
- * @p: pointer to object from which memcg should be extracted. It can be NULL.
- *
- * Retrieves the memory group into which the memory of the pointed kernel
- * object is accounted. If memcg is found, its reference is taken.
- * If a passed kernel object is uncharged, or if proper memcg cannot be found,
- * as well as if mem_cgroup is disabled, NULL is returned.
- *
- * Return: valid memcg pointer with taken reference or NULL.
- */
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- struct mem_cgroup *memcg;
-
- rcu_read_lock();
- do {
- memcg = mem_cgroup_from_obj(p);
- } while (memcg && !css_tryget(&memcg->css));
- rcu_read_unlock();
- return memcg;
-}
-
-/**
- * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup.
- * @memcg: pointer to a valid memory cgroup or NULL.
- *
- * If passed argument is not NULL, returns it without any additional checks
- * and changes. Otherwise, root_mem_cgroup is returned.
- *
- * NOTE: root_mem_cgroup can be NULL during early boot.
- */
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return memcg ? memcg : root_mem_cgroup;
-}
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1880,15 +1844,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
}
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return NULL;
-}
#endif /* CONFIG_MEMCG_KMEM */
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 19010491a603..c3b4cc84877b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -139,6 +139,11 @@ struct dev_pagemap {
};
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 2ec07de1d73b..b6d7b868f290 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -67,6 +67,8 @@
#define IFF_TAP 0x0002
#define IFF_NAPI 0x0010
#define IFF_NAPI_FRAGS 0x0020
+/* Used in TUNSETIFF to bring up tun/tap without carrier */
+#define IFF_NO_CARRIER 0x0040
#define IFF_NO_PI 0x1000
/* This flag has no real effect */
#define IFF_ONE_QUEUE 0x2000
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 2965b354efc8..242d896c00f3 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3357,6 +3357,10 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
goto err;
}
+ if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
+ && !(ctx->flags & IORING_SETUP_R_DISABLED))
+ ctx->submitter_task = get_task_struct(current);
+
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
@@ -3548,6 +3552,9 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
if (!(ctx->flags & IORING_SETUP_R_DISABLED))
return -EBADFD;
+ if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
+ ctx->submitter_task = get_task_struct(current);
+
if (ctx->restrictions.registered)
ctx->restricted = 1;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d5bad0bea6e4..0d9f49c575e0 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -857,7 +857,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (sqe->buf_index || sqe->off || sqe->addr)
return -EINVAL;
flags = READ_ONCE(sqe->len);
- if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
+ if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
return -EINVAL;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2621fd24ad26..ff4bffc502c6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6893,9 +6893,16 @@ static void perf_output_read_group(struct perf_output_handle *handle,
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
+ unsigned long flags;
u64 values[6];
int n = 0;
+ /*
+ * Disabling interrupts avoids all counter scheduling
+ * (context switches, timer based rotation and IPIs).
+ */
+ local_irq_save(flags);
+
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
@@ -6931,6 +6938,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64));
}
+
+ local_irq_restore(flags);
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index cfdf63132d5a..4e51466c4e74 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -884,6 +884,7 @@ static int dbgfs_rm_context(char *name)
struct dentry *root, *dir, **new_dirs;
struct damon_ctx **new_ctxs;
int i, j;
+ int ret = 0;
if (damon_nr_running_ctxs())
return -EBUSY;
@@ -898,14 +899,16 @@ static int dbgfs_rm_context(char *name)
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
GFP_KERNEL);
- if (!new_dirs)
- return -ENOMEM;
+ if (!new_dirs) {
+ ret = -ENOMEM;
+ goto out_dput;
+ }
new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
GFP_KERNEL);
if (!new_ctxs) {
- kfree(new_dirs);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_new_dirs;
}
for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
@@ -925,7 +928,13 @@ static int dbgfs_rm_context(char *name)
dbgfs_ctxs = new_ctxs;
dbgfs_nr_ctxs--;
- return 0;
+ goto out_dput;
+
+out_new_dirs:
+ kfree(new_dirs);
+out_dput:
+ dput(dir);
+ return ret;
}
static ssize_t dbgfs_rm_context_write(struct file *file,
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 7488e27c87c3..bdef9682d0a0 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -2182,12 +2182,12 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
if (!t)
return -ENOMEM;
+ damon_add_target(ctx, t);
if (damon_target_has_pid(ctx)) {
t->pid = find_get_pid(sys_target->pid);
if (!t->pid)
goto destroy_targets_out;
}
- damon_add_target(ctx, t);
err = damon_sysfs_set_regions(t, sys_target->regions);
if (err)
goto destroy_targets_out;
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 1a97610308cb..279e55b4ed87 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -125,6 +125,9 @@ void frontswap_init(unsigned type, unsigned long *map)
* p->frontswap set to something valid to work properly.
*/
frontswap_map_set(sis, map);
+
+ if (!frontswap_enabled())
+ return;
frontswap_ops->init(type);
}
diff --git a/mm/gup.c b/mm/gup.c
index 5abdaf487460..00926abb4426 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2345,8 +2345,28 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
}
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+/*
+ * Fast-gup relies on pte change detection to avoid concurrent pgtable
+ * operations.
+ *
+ * To pin the page, fast-gup needs to do below in order:
+ * (1) pin the page (by prefetching pte), then (2) check pte not changed.
+ *
+ * For the rest of pgtable operations where pgtable updates can be racy
+ * with fast-gup, we need to do (1) clear pte, then (2) check whether page
+ * is pinned.
+ *
+ * Above will work for all pte-level operations, including THP split.
+ *
+ * For THP collapse, it's a bit more complicated because fast-gup may be
+ * walking a pgtable page that is being freed (pte is still valid but pmd
+ * can be cleared already). To avoid race in such condition, we need to
+ * also check pmd here to make sure pmd doesn't change (corresponds to
+ * pmdp_collapse_flush() in the THP collapse code path).
+ */
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
int nr_start = *nr, ret = 0;
@@ -2392,7 +2412,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
goto pte_unmap;
}
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
+ unlikely(pte_val(pte) != pte_val(*ptep))) {
gup_put_folio(folio, 1, flags);
goto pte_unmap;
}
@@ -2439,8 +2460,9 @@ pte_unmap:
* get_user_pages_fast_only implementation that can pin pages. Thus it's still
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- unsigned int flags, struct page **pages, int *nr)
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
return 0;
}
@@ -2764,7 +2786,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
PMD_SHIFT, next, flags, pages, nr))
return 0;
- } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
+ } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e9414ee57c5b..f42bb51e023a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2894,11 +2894,9 @@ static void split_huge_pages_all(void)
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
int nr_pages;
- if (!pfn_valid(pfn))
- continue;
- page = pfn_to_page(pfn);
- if (!get_page_unless_zero(page))
+ page = pfn_to_online_page(pfn);
+ if (!page || !get_page_unless_zero(page))
continue;
if (zone != page_zone(page))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e070b8593b37..0bdfc7e1c933 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3420,6 +3420,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
{
int i, nid = page_to_nid(page);
struct hstate *target_hstate;
+ struct page *subpage;
int rc = 0;
target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
@@ -3453,15 +3454,16 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
mutex_lock(&target_hstate->resize_lock);
for (i = 0; i < pages_per_huge_page(h);
i += pages_per_huge_page(target_hstate)) {
+ subpage = nth_page(page, i);
if (hstate_is_gigantic(target_hstate))
- prep_compound_gigantic_page_for_demote(page + i,
+ prep_compound_gigantic_page_for_demote(subpage,
target_hstate->order);
else
- prep_compound_page(page + i, target_hstate->order);
- set_page_private(page + i, 0);
- set_page_refcounted(page + i);
- prep_new_huge_page(target_hstate, page + i, nid);
- put_page(page + i);
+ prep_compound_page(subpage, target_hstate->order);
+ set_page_private(subpage, 0);
+ set_page_refcounted(subpage);
+ prep_new_huge_page(target_hstate, subpage, nid);
+ put_page(subpage);
}
mutex_unlock(&target_hstate->resize_lock);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 01f71786d530..70b7ac66411c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1083,10 +1083,12 @@ static void collapse_huge_page(struct mm_struct *mm,
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
/*
- * After this gup_fast can't run anymore. This also removes
- * any huge TLB entry from the CPU so we won't allow
- * huge and small TLB entries for the same virtual address
- * to avoid the risk of CPU bugs in that area.
+ * This removes any huge TLB entry from the CPU so we won't allow
+ * huge and small TLB entries for the same virtual address to
+ * avoid the risk of CPU bugs in that area.
+ *
+ * Parallel fast GUP is fine since fast GUP will back off when
+ * it detects PMD is changed.
*/
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
diff --git a/mm/madvise.c b/mm/madvise.c
index 5f0f0948a50e..9ff51650f4f0 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -451,8 +451,11 @@ regular_page:
continue;
}
- /* Do not interfere with other mappings of this page */
- if (page_mapcount(page) != 1)
+ /*
+ * Do not interfere with other mappings of this page and
+ * non-LRU page.
+ */
+ if (!PageLRU(page) || page_mapcount(page) != 1)
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 14439806b5ef..e7ac570dda75 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -345,13 +345,17 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
* not much we can do. We just print a message and ignore otherwise.
*/
+#define FSDAX_INVALID_PGOFF ULONG_MAX
+
/*
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
*
- * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
- * In other cases, such as anonymous and file-backend page, the address to be
- * killed can be caculated by @p itself.
+ * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
+ * filesystem with a memory failure handler has claimed the
+ * memory_failure event. In all other cases, page->index and
+ * page->mapping are sufficient for mapping the page back to its
+ * corresponding user virtual address.
*/
static void add_to_kill(struct task_struct *tsk, struct page *p,
pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
@@ -367,11 +371,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
- /*
- * Since page->mapping is not used for fsdax, we need
- * calculate the address based on the vma.
- */
- if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
+ if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else
@@ -523,7 +523,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, 0, vma, to_kill);
+ add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
+ to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -559,7 +560,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* to be informed of all such data corruptions.
*/
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, 0, vma, to_kill);
+ add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
+ to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -743,6 +745,9 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
};
priv.tk.tsk = p;
+ if (!p->mm)
+ return -EFAULT;
+
mmap_read_lock(p->mm);
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
(void *)&priv);
@@ -1928,7 +1933,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* Call driver's implementation to handle the memory failure, otherwise
* fall back to generic handler.
*/
- if (pgmap->ops->memory_failure) {
+ if (pgmap_has_memory_failure(pgmap)) {
rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
/*
* Fall back to generic handler too if operation is not
diff --git a/mm/memory.c b/mm/memory.c
index 4ba73f5aa8bb..a78814413ac0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4386,14 +4386,20 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
- ret = 0;
+
/* Re-check under ptl */
- if (likely(!vmf_pte_changed(vmf)))
+ if (likely(!vmf_pte_changed(vmf))) {
do_set_pte(vmf, page, vmf->address);
- else
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, vmf->address, vmf->pte);
+
+ ret = 0;
+ } else {
+ update_mmu_tlb(vma, vmf->address, vmf->pte);
ret = VM_FAULT_NOPAGE;
+ }
- update_mmu_tlb(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
}
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 27fb37d65476..dbf6c7a7a7c9 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/memremap.h>
#include <linux/migrate.h>
+#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/oom.h>
@@ -193,10 +194,10 @@ again:
bool anon_exclusive;
pte_t swp_pte;
+ flush_cache_page(vma, addr, pte_pfn(*ptep));
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
if (anon_exclusive) {
- flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush(vma, addr, ptep);
+ pte = ptep_clear_flush(vma, addr, ptep);
if (page_try_share_anon_rmap(page)) {
set_pte_at(mm, addr, ptep, pte);
@@ -206,11 +207,15 @@ again:
goto next;
}
} else {
- ptep_get_and_clear(mm, addr, ptep);
+ pte = ptep_get_and_clear(mm, addr, ptep);
}
migrate->cpages++;
+ /* Set the dirty flag on the folio now the pte is gone. */
+ if (pte_dirty(pte))
+ folio_mark_dirty(page_folio(page));
+
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
entry = make_writable_migration_entry(
@@ -254,13 +259,14 @@ next:
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = mpfn;
}
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(ptep - 1, ptl);
/* Only flush the TLB if we actually modified any entries */
if (unmapped)
flush_tlb_range(walk->vma, start, end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(ptep - 1, ptl);
+
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d47406e..d04211f0ef0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4708,6 +4708,30 @@ void fs_reclaim_release(gfp_t gfp_mask)
EXPORT_SYMBOL_GPL(fs_reclaim_release);
#endif
+/*
+ * Zonelists may change due to hotplug during allocation. Detect when zonelists
+ * have been rebuilt so allocation retries. Reader side does not lock and
+ * retries the allocation if zonelist changes. Writer side is protected by the
+ * embedded spin_lock.
+ */
+static DEFINE_SEQLOCK(zonelist_update_seq);
+
+static unsigned int zonelist_iter_begin(void)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return read_seqbegin(&zonelist_update_seq);
+
+ return 0;
+}
+
+static unsigned int check_retry_zonelist(unsigned int seq)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return read_seqretry(&zonelist_update_seq, seq);
+
+ return seq;
+}
+
/* Perform direct synchronous page reclaim */
static unsigned long
__perform_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -5001,6 +5025,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int compaction_retries;
int no_progress_loops;
unsigned int cpuset_mems_cookie;
+ unsigned int zonelist_iter_cookie;
int reserve_flags;
/*
@@ -5011,11 +5036,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask &= ~__GFP_ATOMIC;
-retry_cpuset:
+restart:
compaction_retries = 0;
no_progress_loops = 0;
compact_priority = DEF_COMPACT_PRIORITY;
cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist_iter_cookie = zonelist_iter_begin();
/*
* The fast path uses conservative alloc_flags to succeed only until
@@ -5187,9 +5213,13 @@ retry:
goto retry;
- /* Deal with possible cpuset update races before we start OOM killing */
- if (check_retry_cpuset(cpuset_mems_cookie, ac))
- goto retry_cpuset;
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * a unnecessary OOM kill.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
/* Reclaim has failed us, start killing things */
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
@@ -5209,9 +5239,13 @@ retry:
}
nopage:
- /* Deal with possible cpuset update races before we fail */
- if (check_retry_cpuset(cpuset_mems_cookie, ac))
- goto retry_cpuset;
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * a unnecessary OOM kill.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
/*
* Make sure that __GFP_NOFAIL request doesn't leak out and make sure
@@ -5706,6 +5740,18 @@ refill:
/* reset page count bias and offset to start of new frag */
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
+ if (unlikely(offset < 0)) {
+ /*
+ * The caller is trying to allocate a fragment
+ * with fragsz > PAGE_SIZE but the cache isn't big
+ * enough to satisfy the request, this may
+ * happen in low memory conditions.
+ * We don't release the cache page because
+ * it could make memory pressure worse
+ * so we simply return NULL here.
+ */
+ return NULL;
+ }
}
nc->pagecnt_bias--;
@@ -6514,9 +6560,8 @@ static void __build_all_zonelists(void *data)
int nid;
int __maybe_unused cpu;
pg_data_t *self = data;
- static DEFINE_SPINLOCK(lock);
- spin_lock(&lock);
+ write_seqlock(&zonelist_update_seq);
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
@@ -6553,7 +6598,7 @@ static void __build_all_zonelists(void *data)
#endif
}
- spin_unlock(&lock);
+ write_sequnlock(&zonelist_update_seq);
}
static noinline void __init
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 9d73dc38e3d7..eb3a68ca92ad 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -288,6 +288,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
* isolate_single_pageblock()
+ * @migratetype: migrate type to set in error recovery.
*
* Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same
@@ -302,9 +303,9 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* the in-use page then splitting the free page.
*/
static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- gfp_t gfp_flags, bool isolate_before, bool skip_isolation)
+ gfp_t gfp_flags, bool isolate_before, bool skip_isolation,
+ int migratetype)
{
- unsigned char saved_mt;
unsigned long start_pfn;
unsigned long isolate_pageblock;
unsigned long pfn;
@@ -328,13 +329,13 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES),
zone->zone_start_pfn);
- saved_mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
+ if (skip_isolation) {
+ int mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
- if (skip_isolation)
- VM_BUG_ON(!is_migrate_isolate(saved_mt));
- else {
- ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt, flags,
- isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+ VM_BUG_ON(!is_migrate_isolate(mt));
+ } else {
+ ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype,
+ flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
if (ret)
return ret;
@@ -475,7 +476,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
failed:
/* restore the original migratetype */
if (!skip_isolation)
- unset_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt);
+ unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype);
return -EBUSY;
}
@@ -537,7 +538,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false, skip_isolation);
+ ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false,
+ skip_isolation, migratetype);
if (ret)
return ret;
@@ -545,7 +547,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true, skip_isolation);
+ ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true,
+ skip_isolation, migratetype);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
return ret;
diff --git a/mm/secretmem.c b/mm/secretmem.c
index e3e9590c6fb3..3f7154099795 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -285,7 +285,7 @@ static int secretmem_init(void)
secretmem_mnt = kern_mount(&secretmem_fs);
if (IS_ERR(secretmem_mnt))
- ret = PTR_ERR(secretmem_mnt);
+ return PTR_ERR(secretmem_mnt);
/* prevent secretmem mappings from ever getting PROT_EXEC */
secretmem_mnt->mnt_flags |= MNT_NOEXEC;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e166051566f4..41afa6d45b23 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -151,7 +151,7 @@ void __delete_from_swap_cache(struct folio *folio,
for (i = 0; i < nr; i++) {
void *entry = xas_store(&xas, shadow);
- VM_BUG_ON_FOLIO(entry != folio, folio);
+ VM_BUG_ON_PAGE(entry != folio, entry);
set_page_private(folio_page(folio, i), 0);
xas_next(&xas);
}
diff --git a/mm/util.c b/mm/util.c
index c9439c66d8cf..346e40177bc6 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -619,6 +619,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if (ret || size <= PAGE_SIZE)
return ret;
+ /* non-sleeping allocations are not supported by vmalloc */
+ if (!gfpflags_allow_blocking(flags))
+ return NULL;
+
/* Don't even allow crazy sizes */
if (unlikely(size > INT_MAX)) {
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b2b1431352dc..382dbe97329f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2550,8 +2550,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
}
if (unlikely(buffer_heads_over_limit)) {
- if (folio_get_private(folio) && folio_trylock(folio)) {
- if (folio_get_private(folio))
+ if (folio_test_private(folio) && folio_trylock(folio)) {
+ if (folio_test_private(folio))
filemap_release_folio(folio, 0);
folio_unlock(folio);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6b9f19122ec1..0ec2f5906a27 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -18,7 +18,6 @@
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
-#include <linux/sched/mm.h>
#include <linux/uidgid.h>
#include <linux/cookie.h>
@@ -1144,13 +1143,7 @@ static int __register_pernet_operations(struct list_head *list,
* setup_net() and cleanup_net() are not possible.
*/
for_each_net(net) {
- struct mem_cgroup *old, *memcg;
-
- memcg = mem_cgroup_or_root(get_mem_cgroup_from_obj(net));
- old = set_active_memcg(memcg);
error = ops_init(ops, net);
- set_active_memcg(old);
- mem_cgroup_put(memcg);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5265d2b6db12..fc764984d687 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4040,7 +4040,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
(!elems->he_cap || !elems->he_operation)) {
- mutex_unlock(&sdata->local->sta_mtx);
sdata_info(sdata,
"HE AP is missing HE capability/operation\n");
ret = false;
@@ -5589,12 +5588,16 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
- if (WARN_ON(!sta))
+ if (WARN_ON(!sta)) {
+ mutex_unlock(&local->sta_mtx);
goto free;
+ }
link_sta = rcu_dereference_protected(sta->link[link->link_id],
lockdep_is_held(&local->sta_mtx));
- if (WARN_ON(!link_sta))
+ if (WARN_ON(!link_sta)) {
+ mutex_unlock(&local->sta_mtx);
goto free;
+ }
changed |= ieee80211_recalc_twt_req(link, link_sta, elems);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5f27e6746762..788a82f9c74d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -10,6 +10,7 @@
#include <linux/random.h>
#include <linux/moduleparam.h>
#include <linux/ieee80211.h>
+#include <linux/minmax.h>
#include <net/mac80211.h>
#include "rate.h"
#include "sta_info.h"
@@ -1550,6 +1551,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct ieee80211_sta_rates *rates;
int i = 0;
+ int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE);
rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
if (!rates)
@@ -1559,10 +1561,10 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
/* Fill up remaining, keep one entry for max_probe_rate */
- for (; i < (mp->hw->max_rates - 1); i++)
+ for (; i < (max_rates - 1); i++)
minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
- if (i < mp->hw->max_rates)
+ if (i < max_rates)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
if (i < IEEE80211_TX_RATE_TABLE_SIZE)
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8e77fd2e9fdf..3f9ddd7f04b6 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -729,7 +729,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
if (!sdata) {
skb->dev = NULL;
- } else {
+ } else if (!dropped) {
unsigned int hdr_size =
ieee80211_hdrlen(hdr->frame_control);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index bf7fe6cd9dfc..13249e97a069 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5878,6 +5878,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ goto start_xmit;
+
/* update QoS header to prioritize control port frames if possible,
* priorization also happens for control port frames send over
* AF_PACKET
@@ -5905,6 +5908,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
}
rcu_read_unlock();
+start_xmit:
/* mutex lock is only needed for incrementing the cookie counter */
mutex_lock(&local->mtx);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 53826c663723..efcefb2dd882 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -301,14 +301,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
local_bh_disable();
spin_lock(&fq->lock);
+ sdata->vif.txqs_stopped[ac] = false;
+
if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
goto out;
if (sdata->vif.type == NL80211_IFTYPE_AP)
ps = &sdata->bss->ps;
- sdata->vif.txqs_stopped[ac] = false;
-
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata)
continue;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 969b33a9dd64..f8897a70c11d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2662,7 +2662,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
dfrag_clear(sk, dfrag);
}
-static void mptcp_cancel_work(struct sock *sk)
+void mptcp_cancel_work(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -2802,13 +2802,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
sock_put(sk);
}
-static void mptcp_close(struct sock *sk, long timeout)
+bool __mptcp_close(struct sock *sk, long timeout)
{
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
bool do_cancel_work = false;
- lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
@@ -2850,6 +2849,17 @@ cleanup:
} else {
mptcp_reset_timeout(msk, 0);
}
+
+ return do_cancel_work;
+}
+
+static void mptcp_close(struct sock *sk, long timeout)
+{
+ bool do_cancel_work;
+
+ lock_sock(sk);
+
+ do_cancel_work = __mptcp_close(sk, timeout);
release_sock(sk);
if (do_cancel_work)
mptcp_cancel_work(sk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 132d50833df1..8f372b8f059c 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -612,6 +612,8 @@ void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+bool __mptcp_close(struct sock *sk, long timeout);
+void mptcp_cancel_work(struct sock *sk);
bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
const struct mptcp_addr_info *b, bool use_port);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index c7d49fb6e7bd..07dd23d0fe04 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -602,30 +602,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
}
-static void mptcp_sock_destruct(struct sock *sk)
-{
- /* if new mptcp socket isn't accepted, it is free'd
- * from the tcp listener sockets request queue, linked
- * from req->sk. The tcp socket is released.
- * This calls the ULP release function which will
- * also remove the mptcp socket, via
- * sock_put(ctx->conn).
- *
- * Problem is that the mptcp socket will be in
- * ESTABLISHED state and will not have the SOCK_DEAD flag.
- * Both result in warnings from inet_sock_destruct.
- */
- if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- sk->sk_state = TCP_CLOSE;
- WARN_ON_ONCE(sk->sk_socket);
- sock_orphan(sk);
- }
-
- /* We don't need to clear msk->subflow, as it's still NULL at this point */
- mptcp_destroy_common(mptcp_sk(sk), 0);
- inet_sock_destruct(sk);
-}
-
static void mptcp_force_close(struct sock *sk)
{
/* the msk is not yet exposed to user-space */
@@ -768,7 +744,6 @@ create_child:
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
- new_msk->sk_destruct = mptcp_sock_destruct;
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
@@ -1763,13 +1738,19 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk;
- bool slow;
+ bool slow, do_cancel_work;
+ sock_hold(sk);
slow = lock_sock_fast_nested(sk);
next = msk->dl_next;
msk->first = NULL;
msk->dl_next = NULL;
+
+ do_cancel_work = __mptcp_close(sk, 0);
unlock_sock_fast(sk, slow);
+ if (do_cancel_work)
+ mptcp_cancel_work(sk);
+ sock_put(sk);
}
/* we are still under the listener msk socket lock */
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index d55afb8d14be..5950974ae8f6 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1394,7 +1394,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
err = tcf_ct_flow_table_get(net, params);
if (err)
- goto cleanup;
+ goto cleanup_params;
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -1409,6 +1409,9 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
return res;
+cleanup_params:
+ if (params->tmpl)
+ nf_ct_put(params->tmpl);
cleanup:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2c127951764a..775836f6785a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1361,7 +1361,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
25599, /* 4.166666... */
17067, /* 2.777777... */
12801, /* 2.083333... */
- 11769, /* 1.851851... */
+ 11377, /* 1.851725... */
10239, /* 1.666666... */
8532, /* 1.388888... */
7680, /* 1.250000... */
@@ -1444,7 +1444,7 @@ static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate)
25599, /* 4.166666... */
17067, /* 2.777777... */
12801, /* 2.083333... */
- 11769, /* 1.851851... */
+ 11377, /* 1.851725... */
10239, /* 1.666666... */
8532, /* 1.388888... */
7680, /* 1.250000... */
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index d84ffdf47210..5a478649f338 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -450,6 +450,16 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x51cb,
},
+ /* RaptorLake-M */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51ce,
+ },
+ /* RaptorLake-PX */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51cf,
+ },
#endif
};
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 15596452ca37..4f19fd9b65d1 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -901,7 +901,10 @@ static void nau8824_jdet_work(struct work_struct *work)
NAU8824_IRQ_KEY_RELEASE_DIS |
NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
- nau8824_sema_release(nau8824);
+ if (nau8824->resume_lock) {
+ nau8824_sema_release(nau8824);
+ nau8824->resume_lock = false;
+ }
}
static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
@@ -966,7 +969,10 @@ static irqreturn_t nau8824_interrupt(int irq, void *data)
/* release semaphore held after resume,
* and cancel jack detection
*/
- nau8824_sema_release(nau8824);
+ if (nau8824->resume_lock) {
+ nau8824_sema_release(nau8824);
+ nau8824->resume_lock = false;
+ }
cancel_work_sync(&nau8824->jdet_work);
} else if (active_irq & NAU8824_KEY_SHORT_PRESS_IRQ) {
int key_status, button_pressed;
@@ -1524,6 +1530,7 @@ static int __maybe_unused nau8824_suspend(struct snd_soc_component *component)
static int __maybe_unused nau8824_resume(struct snd_soc_component *component)
{
struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
+ int ret;
regcache_cache_only(nau8824->regmap, false);
regcache_sync(nau8824->regmap);
@@ -1531,7 +1538,10 @@ static int __maybe_unused nau8824_resume(struct snd_soc_component *component)
/* Hold semaphore to postpone playback happening
* until jack detection done.
*/
- nau8824_sema_acquire(nau8824, 0);
+ nau8824->resume_lock = true;
+ ret = nau8824_sema_acquire(nau8824, 0);
+ if (ret)
+ nau8824->resume_lock = false;
enable_irq(nau8824->irq);
}
@@ -1940,6 +1950,7 @@ static int nau8824_i2c_probe(struct i2c_client *i2c)
nau8824->regmap = devm_regmap_init_i2c(i2c, &nau8824_regmap_config);
if (IS_ERR(nau8824->regmap))
return PTR_ERR(nau8824->regmap);
+ nau8824->resume_lock = false;
nau8824->dev = dev;
nau8824->irq = i2c->irq;
sema_init(&nau8824->jd_sem, 1);
diff --git a/sound/soc/codecs/nau8824.h b/sound/soc/codecs/nau8824.h
index de4bae8281d0..5fcfc43dfc85 100644
--- a/sound/soc/codecs/nau8824.h
+++ b/sound/soc/codecs/nau8824.h
@@ -436,6 +436,7 @@ struct nau8824 {
struct semaphore jd_sem;
int fs;
int irq;
+ int resume_lock;
int micbias_voltage;
int vref_impedance;
int jkdet_polarity;
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 5a844329800f..0f8e6dd214b0 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2494,7 +2494,7 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
/* Select JD-source */
snd_soc_component_update_bits(component, RT5640_JD_CTRL,
- RT5640_JD_MASK, rt5640->jd_src);
+ RT5640_JD_MASK, rt5640->jd_src << RT5640_JD_SFT);
/* Selecting GPIO01 as an interrupt */
snd_soc_component_update_bits(component, RT5640_GPIO_CTRL1,
@@ -2504,12 +2504,8 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
snd_soc_component_update_bits(component, RT5640_GPIO_CTRL3,
RT5640_GP1_PF_MASK, RT5640_GP1_PF_OUT);
- /* Enabling jd2 in general control 1 */
snd_soc_component_write(component, RT5640_DUMMY1, 0x3f41);
- /* Enabling jd2 in general control 2 */
- snd_soc_component_write(component, RT5640_DUMMY2, 0x4001);
-
rt5640_set_ovcd_params(component);
/*
@@ -2518,12 +2514,25 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
* pin 0/1 instead of it being stuck to 1. So we invert the JD polarity
* on systems where the hardware does not already do this.
*/
- if (rt5640->jd_inverted)
- snd_soc_component_write(component, RT5640_IRQ_CTRL1,
- RT5640_IRQ_JD_NOR);
- else
- snd_soc_component_write(component, RT5640_IRQ_CTRL1,
- RT5640_IRQ_JD_NOR | RT5640_JD_P_INV);
+ if (rt5640->jd_inverted) {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD1_IN4P)
+ snd_soc_component_write(component, RT5640_IRQ_CTRL1,
+ RT5640_IRQ_JD_NOR);
+ else if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK | RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR | RT5640_JD2_EN);
+ } else {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD1_IN4P)
+ snd_soc_component_write(component, RT5640_IRQ_CTRL1,
+ RT5640_IRQ_JD_NOR | RT5640_JD_P_INV);
+ else if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK | RT5640_JD2_P_MASK |
+ RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR | RT5640_JD2_P_INV |
+ RT5640_JD2_EN);
+ }
rt5640->jack = jack;
if (rt5640->jack->status & SND_JACK_MICROPHONE) {
@@ -2725,10 +2734,8 @@ static int rt5640_probe(struct snd_soc_component *component)
if (device_property_read_u32(component->dev,
"realtek,jack-detect-source", &val) == 0) {
- if (val <= RT5640_JD_SRC_GPIO4)
- rt5640->jd_src = val << RT5640_JD_SFT;
- else if (val == RT5640_JD_SRC_HDA_HEADER)
- rt5640->jd_src = RT5640_JD_SRC_HDA_HEADER;
+ if (val <= RT5640_JD_SRC_HDA_HEADER)
+ rt5640->jd_src = val;
else
dev_warn(component->dev, "Warning: Invalid jack-detect-source value: %d, leaving jack-detect disabled\n",
val);
@@ -2809,12 +2816,31 @@ static int rt5640_resume(struct snd_soc_component *component)
regcache_sync(rt5640->regmap);
if (rt5640->jack) {
- if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
+ if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
snd_soc_component_update_bits(component,
RT5640_DUMMY2, 0x1100, 0x1100);
- else
- snd_soc_component_write(component, RT5640_DUMMY2,
- 0x4001);
+ } else {
+ if (rt5640->jd_inverted) {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(
+ component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK |
+ RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR |
+ RT5640_JD2_EN);
+
+ } else {
+ if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+ snd_soc_component_update_bits(
+ component, RT5640_DUMMY2,
+ RT5640_IRQ_JD2_MASK |
+ RT5640_JD2_P_MASK |
+ RT5640_JD2_MASK,
+ RT5640_IRQ_JD2_NOR |
+ RT5640_JD2_P_INV |
+ RT5640_JD2_EN);
+ }
+ }
queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
}
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 505c93514051..f58b88e3325b 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -1984,6 +1984,20 @@
#define RT5640_M_MONO_ADC_R_SFT 12
#define RT5640_MCLK_DET (0x1 << 11)
+/* General Control 1 (0xfb) */
+#define RT5640_IRQ_JD2_MASK (0x1 << 12)
+#define RT5640_IRQ_JD2_SFT 12
+#define RT5640_IRQ_JD2_BP (0x0 << 12)
+#define RT5640_IRQ_JD2_NOR (0x1 << 12)
+#define RT5640_JD2_P_MASK (0x1 << 10)
+#define RT5640_JD2_P_SFT 10
+#define RT5640_JD2_P_NOR (0x0 << 10)
+#define RT5640_JD2_P_INV (0x1 << 10)
+#define RT5640_JD2_MASK (0x1 << 8)
+#define RT5640_JD2_SFT 8
+#define RT5640_JD2_DIS (0x0 << 8)
+#define RT5640_JD2_EN (0x1 << 8)
+
/* Codec Private Register definition */
/* MIC Over current threshold scale factor (0x15) */
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index bb653b664146..b6765235a4b3 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -495,6 +495,8 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
},
};
+static const struct regmap_config tas2770_i2c_regmap;
+
static int tas2770_codec_probe(struct snd_soc_component *component)
{
struct tas2770_priv *tas2770 =
@@ -508,6 +510,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
}
tas2770_reset(tas2770);
+ regmap_reinit_cache(tas2770->regmap, &tas2770_i2c_regmap);
return 0;
}
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 14be29530fb5..3f128ced4180 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -698,6 +698,10 @@ static int imx_card_parse_of(struct imx_card_data *data)
of_node_put(cpu);
of_node_put(codec);
of_node_put(platform);
+
+ cpu = NULL;
+ codec = NULL;
+ platform = NULL;
}
return 0;
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index a49bfaab6b21..2ff30b40a1e4 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -270,6 +270,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AFF")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00")
},
.driver_data = (void *)(SOF_SDW_TGL_HDMI |
diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h
index b238dbc9eb85..6a10ff5f5be9 100644
--- a/tools/include/linux/gfp.h
+++ b/tools/include/linux/gfp.h
@@ -3,26 +3,7 @@
#define _TOOLS_INCLUDE_LINUX_GFP_H
#include <linux/types.h>
-
-#define __GFP_BITS_SHIFT 26
-#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-#define __GFP_HIGH 0x20u
-#define __GFP_IO 0x40u
-#define __GFP_FS 0x80u
-#define __GFP_NOWARN 0x200u
-#define __GFP_ZERO 0x8000u
-#define __GFP_ATOMIC 0x80000u
-#define __GFP_ACCOUNT 0x100000u
-#define __GFP_DIRECT_RECLAIM 0x400000u
-#define __GFP_KSWAPD_RECLAIM 0x2000000u
-
-#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM)
-
-#define GFP_ZONEMASK 0x0fu
-#define GFP_ATOMIC (__GFP_HIGH | __GFP_ATOMIC | __GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#include <linux/gfp_types.h>
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
diff --git a/tools/include/linux/gfp_types.h b/tools/include/linux/gfp_types.h
new file mode 100644
index 000000000000..5f9f1ed190a0
--- /dev/null
+++ b/tools/include/linux/gfp_types.h
@@ -0,0 +1 @@
+#include "../../../include/linux/gfp_types.h"
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index dfb6173b2a82..9e9a2b67de19 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -114,8 +114,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
for (i = 0; i < nsyscalls; ++i)
for (j = 0; j < expected_nr_events[i]; ++j) {
- int foo = syscalls[i]();
- ++foo;
+ syscalls[i]();
}
md = &evlist->mmap[0];
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 6a001fcfed68..4952abe716f3 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -332,7 +332,7 @@ out_delete_evlist:
out:
if (err == -EACCES)
return TEST_SKIP;
- if (err < 0)
+ if (err < 0 || errs != 0)
return TEST_FAIL;
return TEST_OK;
}
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 00c7285ce1ac..301f95427159 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -61,7 +61,7 @@ test_register_capture() {
echo "Register capture test [Skipped missing registers]"
return
fi
- if ! perf record -o - --intr-regs=di,r8,dx,cx -e cpu/br_inst_retired.near_call/p \
+ if ! perf record -o - --intr-regs=di,r8,dx,cx -e br_inst_retired.near_call:p \
-c 1000 --per-thread true 2> /dev/null \
| perf script -F ip,sym,iregs -i - 2> /dev/null \
| egrep -q "DI:"
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index c644f94a6500..ec801cffae6b 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -12,7 +12,8 @@ if ! [ -x "$(command -v cc)" ]; then
fi
# skip the test if the hardware doesn't support branch stack sampling
-perf record -b -o- -B true > /dev/null 2>&1 || exit 2
+# and if the architecture doesn't support filter types: any,save_type,u
+perf record -b -o- -B --branch-filter any,save_type,u true > /dev/null 2>&1 || exit 2
TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 4fd8d703ff19..8ab035b55875 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -43,10 +43,11 @@ static bool is_ignored_symbol(const char *name, char type)
/* Symbol names that begin with the following are ignored.*/
static const char * const ignored_prefixes[] = {
"$", /* local symbols for ARM, MIPS, etc. */
- ".LASANPC", /* s390 kasan local symbols */
+ ".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
"__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
- "__kvm_nvhe_", /* arm64 non-VHE KVM namespace */
+ "__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */
+ "__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */
"__AArch64ADRPThunk_", /* arm64 lld */
"__ARMV5PILongThunk_", /* arm lld */
"__ARMV7PILongThunk_",
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 9dfae1bda9cc..485e1a343165 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -269,7 +269,7 @@ CFLAGS_expr-flex.o += $(flex_flags)
bison_flags := -DYYENABLE_NLS=0
BISON_GE_35 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 35)
ifeq ($(BISON_GE_35),1)
- bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum
+ bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum -Wno-unused-but-set-variable -Wno-unknown-warning-option
else
bison_flags += -w
endif
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 22dcfe07e886..906476a839e1 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -498,7 +498,7 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco
static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
{
union perf_mem_data_src data_src = { 0 };
- bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
+ bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
if (record->op == ARM_SPE_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index c72f8ad96f75..9aa8cdd93de4 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -48,6 +48,7 @@ const volatile __u32 num_cpus = 1;
int enabled = 0;
int use_cgroup_v2 = 0;
+int perf_subsys_id = -1;
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
@@ -58,7 +59,15 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
int level;
int cnt;
- cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
level = BPF_CORE_READ(cgrp, level);
for (cnt = 0; i < MAX_LEVELS; i++) {
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index c4ba2bcf179f..38e3b287dbb2 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -94,6 +94,8 @@ const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
const volatile bool uses_cgroup_v1 = false;
+int perf_subsys_id = -1;
+
/*
* Old kernel used to call it task_struct->state and now it's '__state'.
* Use BPF CO-RE "ignored suffix rule" to deal with it like below:
@@ -119,11 +121,19 @@ static inline __u64 get_cgroup_id(struct task_struct *t)
{
struct cgroup *cgrp;
- if (uses_cgroup_v1)
- cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_event_cgrp_id], cgroup);
- else
- cgrp = BPF_CORE_READ(t, cgroups, dfl_cgrp);
+ if (!uses_cgroup_v1)
+ return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id);
+
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup);
return BPF_CORE_READ(cgrp, kn, id);
}
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
index 284f8eabd3b9..7c9f9150bad5 100644
--- a/tools/perf/util/parse-events-hybrid.c
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -33,7 +33,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr,
* If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
*/
attr->type = type;
- attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
+ attr->config = (attr->config & PERF_HW_EVENT_MASK) |
+ ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
}
static int create_event_hybrid(__u32 config_type, int *idx,
@@ -48,13 +49,25 @@ static int create_event_hybrid(__u32 config_type, int *idx,
__u64 config = attr->config;
config_hybrid_attr(attr, config_type, pmu->type);
+
+ /*
+ * Some hybrid hardware cache events are only available on one CPU
+ * PMU. For example, the 'L1-dcache-load-misses' is only available
+ * on cpu_core, while the 'L1-icache-loads' is only available on
+ * cpu_atom. We need to remove "not supported" hybrid cache events.
+ */
+ if (attr->type == PERF_TYPE_HW_CACHE
+ && !is_event_supported(attr->type, attr->config))
+ return 0;
+
evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
- if (evsel)
+ if (evsel) {
evsel->pmu_name = strdup(pmu->name);
- else
+ if (!evsel->pmu_name)
+ return -ENOMEM;
+ } else
return -ENOMEM;
-
attr->type = type;
attr->config = config;
return 0;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f05e15acd33f..f3b2c2a87456 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -28,6 +28,7 @@
#include "util/parse-events-hybrid.h"
#include "util/pmu-hybrid.h"
#include "tracepoint.h"
+#include "thread_map.h"
#define MAX_NAME_LEN 100
@@ -157,6 +158,44 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
+bool is_event_supported(u8 type, u64 config)
+{
+ bool ret = true;
+ int open_return;
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ };
+ struct perf_thread_map *tmap = thread_map__new_by_tid(0);
+
+ if (tmap == NULL)
+ return false;
+
+ evsel = evsel__new(&attr);
+ if (evsel) {
+ open_return = evsel__open(evsel, NULL, tmap);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
+ }
+ evsel__delete(evsel);
+ }
+
+ perf_thread_map__put(tmap);
+ return ret;
+}
+
const char *event_type(int type)
{
switch (type) {
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 7e6a601d9cd0..07df7bb7b042 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -19,6 +19,7 @@ struct option;
struct perf_pmu;
bool have_tracepoints(struct list_head *evlist);
+bool is_event_supported(u8 type, u64 config);
const char *event_type(int type);
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index ba1ab5134685..c4d5d87fae2f 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -22,7 +22,6 @@
#include "probe-file.h"
#include "string2.h"
#include "strlist.h"
-#include "thread_map.h"
#include "tracepoint.h"
#include "pfm.h"
#include "pmu-hybrid.h"
@@ -239,44 +238,6 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
strlist__delete(sdtlist);
}
-static bool is_event_supported(u8 type, unsigned int config)
-{
- bool ret = true;
- int open_return;
- struct evsel *evsel;
- struct perf_event_attr attr = {
- .type = type,
- .config = config,
- .disabled = 1,
- };
- struct perf_thread_map *tmap = thread_map__new_by_tid(0);
-
- if (tmap == NULL)
- return false;
-
- evsel = evsel__new(&attr);
- if (evsel) {
- open_return = evsel__open(evsel, NULL, tmap);
- ret = open_return >= 0;
-
- if (open_return == -EACCES) {
- /*
- * This happens if the paranoid value
- * /proc/sys/kernel/perf_event_paranoid is set to 2
- * Re-run with exclude_kernel set; we don't do that
- * by default as some ARM machines do not support it.
- *
- */
- evsel->core.attr.exclude_kernel = 1;
- ret = evsel__open(evsel, NULL, tmap) >= 0;
- }
- evsel__delete(evsel);
- }
-
- perf_thread_map__put(tmap);
- return ret;
-}
-
int print_hwcache_events(const char *event_glob, bool name_only)
{
unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index c92326c2233a..0f5ba28339cf 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -3,4 +3,4 @@ perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
-CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-error=deprecated-declarations
+CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 4c122f1b1737..6448cb9f710f 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -48,6 +48,8 @@ LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
+LIBKVM_STRING += lib/string_override.c
+
LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
@@ -220,7 +222,8 @@ LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
-LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
+LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
+LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
@@ -231,6 +234,12 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
+# Compile the string overrides as freestanding to prevent the compiler from
+# generating self-referential code, e.g. without "freestanding" the compiler may
+# "optimize" memcmp() by invoking memcmp(), thus causing infinite recursion.
+$(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
+
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 1c2749b1481a..76c583a07ea2 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -31,8 +31,9 @@
* These limitations are worked around in this test by using a large enough
* region of memory for each vCPU such that the number of translations cached in
* the TLB and the number of pages held in pagevecs are a small fraction of the
- * overall workload. And if either of those conditions are not true this test
- * will fail rather than silently passing.
+ * overall workload. And if either of those conditions are not true (for example
+ * in nesting, where TLB size is unlimited) this test will print a warning
+ * rather than silently passing.
*/
#include <inttypes.h>
#include <limits.h>
@@ -172,17 +173,23 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
vcpu_idx, no_pfn, pages);
/*
- * Test that at least 90% of memory has been marked idle (the rest might
- * not be marked idle because the pages have not yet made it to an LRU
- * list or the translations are still cached in the TLB). 90% is
+ * Check that at least 90% of memory has been marked idle (the rest
+ * might not be marked idle because the pages have not yet made it to an
+ * LRU list or the translations are still cached in the TLB). 90% is
* arbitrary; high enough that we ensure most memory access went through
* access tracking but low enough as to not make the test too brittle
* over time and across architectures.
+ *
+ * Note that when run in nested virtualization, this check will trigger
+ * much more frequently because TLB size is unlimited and since no flush
+ * happens, much more pages are cached there and guest won't see the
+ * "idle" bit cleared.
*/
- TEST_ASSERT(still_idle < pages / 10,
- "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
- PRIu64 ").\n",
- vcpu_idx, still_idle, pages);
+ if (still_idle < pages / 10)
+ printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64
+ "out of %" PRIu64 "), this will affect performance results"
+ ".\n",
+ vcpu_idx, still_idle, pages);
close(page_idle_fd);
close(pagemap_fd);
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 99fa1410964c..790c6d1ecb34 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -617,6 +617,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot);
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size);
+bool kvm_vm_has_ept(struct kvm_vm *vm);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 71ade6100fd3..2bd25b191d15 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -22,7 +22,7 @@ static void test_dump_stack(void)
* Build and run this command:
*
* addr2line -s -e /proc/$PPID/exe -fpai {backtrace addresses} | \
- * grep -v test_dump_stack | cat -n 1>&2
+ * cat -n 1>&2
*
* Note that the spacing is different and there's no newline.
*/
@@ -36,18 +36,24 @@ static void test_dump_stack(void)
n * (((sizeof(void *)) * 2) + 1) +
/* Null terminator: */
1];
- char *c;
+ char *c = cmd;
n = backtrace(stack, n);
- c = &cmd[0];
- c += sprintf(c, "%s", addr2line);
/*
- * Skip the first 3 frames: backtrace, test_dump_stack, and
- * test_assert. We hope that backtrace isn't inlined and the other two
- * we've declared noinline.
+ * Skip the first 2 frames, which should be test_dump_stack() and
+ * test_assert(); both of which are declared noinline. Bail if the
+ * resulting stack trace would be empty. Otherwise, addr2line will block
+ * waiting for addresses to be passed in via stdin.
*/
+ if (n <= 2) {
+ fputs(" (stack trace empty)\n", stderr);
+ return;
+ }
+
+ c += sprintf(c, "%s", addr2line);
for (i = 2; i < n; i++)
c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1);
+
c += sprintf(c, "%s", pipeline);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-result"
diff --git a/tools/testing/selftests/kvm/lib/string_override.c b/tools/testing/selftests/kvm/lib/string_override.c
new file mode 100644
index 000000000000..632398adc229
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/string_override.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+
+/*
+ * Override the "basic" built-in string helpers so that they can be used in
+ * guest code. KVM selftests don't support dynamic loading in guest code and
+ * will jump into the weeds if the compiler decides to insert an out-of-line
+ * call via the PLT.
+ */
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) {
+ if ((res = *su1 - *su2) != 0)
+ break;
+ }
+ return res;
+}
+
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 80a568c439b8..d21049c38fc5 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018, Google LLC.
*/
+#include <asm/msr-index.h>
+
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
@@ -542,9 +544,27 @@ void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
__nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
}
+bool kvm_vm_has_ept(struct kvm_vm *vm)
+{
+ struct kvm_vcpu *vcpu;
+ uint64_t ctrl;
+
+ vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
+ TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
+
+ ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
+ if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+ return false;
+
+ ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
+ return ctrl & SECONDARY_EXEC_ENABLE_EPT;
+}
+
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
+ TEST_REQUIRE(kvm_vm_has_ept(vm));
+
vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
index b1905d280ef5..e0004bd26536 100644
--- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
@@ -14,6 +14,9 @@
#include "kvm_util.h"
#include "processor.h"
+/* VMCALL and VMMCALL are both 3-byte opcodes. */
+#define HYPERCALL_INSN_SIZE 3
+
static bool ud_expected;
static void guest_ud_handler(struct ex_regs *regs)
@@ -22,7 +25,7 @@ static void guest_ud_handler(struct ex_regs *regs)
GUEST_DONE();
}
-extern unsigned char svm_hypercall_insn;
+extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t svm_do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
@@ -39,7 +42,7 @@ static uint64_t svm_do_sched_yield(uint8_t apic_id)
return ret;
}
-extern unsigned char vmx_hypercall_insn;
+extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t vmx_do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
@@ -56,30 +59,20 @@ static uint64_t vmx_do_sched_yield(uint8_t apic_id)
return ret;
}
-static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn)
-{
- uint32_t exp = 0, obs = 0;
-
- memcpy(&exp, exp_insn, sizeof(exp));
- memcpy(&obs, obs_insn, sizeof(obs));
-
- GUEST_ASSERT_EQ(exp, obs);
-}
-
static void guest_main(void)
{
- unsigned char *native_hypercall_insn, *hypercall_insn;
+ uint8_t *native_hypercall_insn, *hypercall_insn;
uint8_t apic_id;
apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
if (is_intel_cpu()) {
- native_hypercall_insn = &vmx_hypercall_insn;
- hypercall_insn = &svm_hypercall_insn;
+ native_hypercall_insn = vmx_hypercall_insn;
+ hypercall_insn = svm_hypercall_insn;
svm_do_sched_yield(apic_id);
} else if (is_amd_cpu()) {
- native_hypercall_insn = &svm_hypercall_insn;
- hypercall_insn = &vmx_hypercall_insn;
+ native_hypercall_insn = svm_hypercall_insn;
+ hypercall_insn = vmx_hypercall_insn;
vmx_do_sched_yield(apic_id);
} else {
GUEST_ASSERT(0);
@@ -87,8 +80,13 @@ static void guest_main(void)
return;
}
+ /*
+ * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
+ * occurs). Verify that a #UD is NOT expected and that KVM patched in
+ * the native hypercall.
+ */
GUEST_ASSERT(!ud_expected);
- assert_hypercall_insn(native_hypercall_insn, hypercall_insn);
+ GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE));
GUEST_DONE();
}
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 072d709c96b4..65aea27d761c 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -328,7 +328,7 @@ static void test_extra_filter(const struct test_params p)
if (bind(fd1, addr, sockaddr_size()))
error(1, errno, "failed to bind recv socket 1");
- if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE)
+ if (!bind(fd2, addr, sockaddr_size()) || errno != EADDRINUSE)
error(1, errno, "bind socket 2 should fail with EADDRINUSE");
free(addr);
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 363b98228301..5d3440f474dd 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -14,6 +14,7 @@ struct virtio_device {
u64 features;
struct list_head vqs;
spinlock_t vqs_list_lock;
+ const struct virtio_config_ops *config;
};
struct virtqueue {
@@ -23,7 +24,9 @@ struct virtqueue {
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
+ unsigned int num_max;
void *priv;
+ bool reset;
};
/* Interfaces exported by virtio_ring. */
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index f2640e505c4e..2a8a70e2a950 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -3,6 +3,11 @@
#include <linux/virtio.h>
#include <uapi/linux/virtio_config.h>
+struct virtio_config_ops {
+ int (*disable_vq_and_reset)(struct virtqueue *vq);
+ int (*enable_vq_after_reset)(struct virtqueue *vq);
+};
+
/*
* __virtio_test_bit - helper to test feature bits. For use by transports.
* Devices should normally use virtio_has_feature,